summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap27
-rw-r--r--CREDITS4
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon94
-rw-r--r--Documentation/ABI/testing/sysfs-driver-panfrost-profiling10
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst44
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt15
-rw-r--r--Documentation/admin-guide/mm/zswap.rst4
-rw-r--r--Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst597
-rw-r--r--Documentation/arch/x86/resctrl.rst2
-rw-r--r--Documentation/dev-tools/testing-overview.rst2
-rw-r--r--Documentation/devicetree/bindings/clock/keystone-gate.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/keystone-pll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/adpll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/apll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/autoidle.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/clockdomain.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/composite.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/divider.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/dpll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fapll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/gate.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/interface.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/mux.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml39
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml62
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml25
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml47
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml89
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml7
-rw-r--r--Documentation/devicetree/bindings/dts-coding-style.rst2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml5
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml147
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml38
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/driver-api/dma-buf.rst2
-rw-r--r--Documentation/driver-api/virtio/writing_virtio_drivers.rst1
-rw-r--r--Documentation/filesystems/bcachefs/index.rst11
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/gpu/amdgpu/debugging.rst80
-rw-r--r--Documentation/gpu/amdgpu/display/display-contributing.rst2
-rw-r--r--Documentation/gpu/amdgpu/index.rst1
-rw-r--r--Documentation/gpu/driver-uapi.rst5
-rw-r--r--Documentation/gpu/drm-kms.rst22
-rw-r--r--Documentation/gpu/i915.rst9
-rw-r--r--Documentation/gpu/panfrost.rst9
-rw-r--r--Documentation/gpu/rfc/i915_vm_bind.h11
-rw-r--r--Documentation/kbuild/llvm.rst2
-rw-r--r--Documentation/mm/page_owner.rst73
-rw-r--r--Documentation/networking/devlink/devlink-eswitch-attr.rst76
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/representors.rst1
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst2
-rw-r--r--Documentation/rust/arch-support.rst2
-rw-r--r--Documentation/timers/no_hz.rst7
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst42
-rw-r--r--Documentation/virt/kvm/x86/msr.rst19
-rw-r--r--MAINTAINERS408
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig20
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/Makefile4
-rw-r--r--arch/arc/boot/dts/axc003.dtsi4
-rw-r--r--arch/arc/boot/dts/hsdk.dts1
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi2
-rw-r--r--arch/arc/include/asm/cachetype.h9
-rw-r--r--arch/arc/include/asm/dsp.h2
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h2
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-arcv2.S8
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/head.S2
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/kprobes.c7
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/signal.c7
-rw-r--r--arch/arc/kernel/traps.c2
-rw-r--r--arch/arc/kernel/vmlinux.lds.S4
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arc/mm/tlbex.S8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g5ek.dts8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s-warp.dts1
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c23
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi40
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi36
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts1
-rw-r--r--arch/arm64/include/asm/tlbflush.h20
-rw-r--r--arch/arm64/kernel/head.S36
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kvm/arm.c13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c3
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c23
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c3
-rw-r--r--arch/arm64/kvm/mmu.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi7
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts33
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi24
-rw-r--r--arch/loongarch/include/asm/addrspace.h1
-rw-r--r--arch/loongarch/include/asm/crash_reserve.h (renamed from arch/loongarch/include/asm/crash_core.h)4
-rw-r--r--arch/loongarch/include/asm/io.h20
-rw-r--r--arch/loongarch/include/asm/kfence.h9
-rw-r--r--arch/loongarch/include/asm/page.h26
-rw-r--r--arch/loongarch/include/asm/perf_event.h8
-rw-r--r--arch/loongarch/include/asm/tlb.h2
-rw-r--r--arch/loongarch/kernel/perf_event.c2
-rw-r--r--arch/loongarch/mm/fault.c4
-rw-r--r--arch/loongarch/mm/mmap.c4
-rw-r--r--arch/loongarch/mm/pgtable.c4
-rw-r--r--arch/m68k/include/asm/pgtable.h2
-rw-r--r--arch/mips/Kconfig18
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/ptrace.c15
-rw-r--r--arch/mips/kernel/scall32-o32.S23
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-n64.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S33
-rw-r--r--arch/nios2/kernel/prom.c6
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig2
-rw-r--r--arch/parisc/include/asm/mman.h14
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c8
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h3
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/riscv/Kconfig.errata8
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/errata/thead/errata.c24
-rw-r--r--arch/riscv/include/asm/errata_list.h20
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h8
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h3
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h2
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile2
-rw-r--r--arch/riscv/kernel/patch.c8
-rw-r--r--arch/riscv/kernel/process.c5
-rw-r--r--arch/riscv/kernel/signal.c15
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/vdso/Makefile1
-rw-r--r--arch/riscv/kvm/aia_aplic.c37
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c2
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/riscv/mm/tlbflush.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c16
-rw-r--r--arch/s390/include/asm/atomic.h44
-rw-r--r--arch/s390/include/asm/atomic_ops.h22
-rw-r--r--arch/s390/include/asm/preempt.h36
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c10
-rw-r--r--arch/s390/kernel/perf_pai_ext.c10
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig31
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S20
-rw-r--r--arch/x86/coco/core.c93
-rw-r--r--arch/x86/entry/common.c75
-rw-r--r--arch/x86/entry/entry_64.S61
-rw-r--r--arch/x86/entry/entry_64_compat.S16
-rw-r--r--arch/x86/entry/entry_fred.c10
-rw-r--r--arch/x86/entry/syscall_32.c21
-rw-r--r--arch/x86/entry/syscall_64.c19
-rw-r--r--arch/x86/entry/syscall_x32.c10
-rw-r--r--arch/x86/entry/vdso/Makefile1
-rw-r--r--arch/x86/events/amd/core.c39
-rw-r--r--arch/x86/events/amd/lbr.c16
-rw-r--r--arch/x86/events/core.c1
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--arch/x86/events/intel/lbr.c1
-rw-r--r--arch/x86/hyperv/hv_apic.c16
-rw-r--r--arch/x86/hyperv/hv_proc.c22
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/apic.h3
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/barrier.h3
-rw-r--r--arch/x86/include/asm/coco.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h8
-rw-r--r--arch/x86/include/asm/cpufeatures.h15
-rw-r--r--arch/x86/include/asm/crash_reserve.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/msr-index.h9
-rw-r--r--arch/x86/include/asm/nospec-branch.h38
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/required-features.h3
-rw-r--r--arch/x86/include/asm/sev.h8
-rw-r--r--arch/x86/include/asm/syscall.h11
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/include/uapi/asm/kvm.h23
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/callthunks.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c56
-rw-r--r--arch/x86/kernel/cpu/bugs.c170
-rw-r--r--arch/x86/kernel/cpu/common.c70
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c6
-rw-r--r--arch/x86/kernel/cpu/mce/core.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h3
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/topology.c7
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c51
-rw-r--r--arch/x86/kernel/eisa.c3
-rw-r--r--arch/x86/kernel/kvm.c11
-rw-r--r--arch/x86/kernel/nmi.c24
-rw-r--r--arch/x86/kernel/probe_roms.c10
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/sev-shared.c6
-rw-r--r--arch/x86/kernel/sev.c37
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/Makefile5
-rw-r--r--arch/x86/kvm/cpuid.c43
-rw-r--r--arch/x86/kvm/cpuid.h10
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/mmu/mmu.c11
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c51
-rw-r--r--arch/x86/kvm/pmu.c16
-rw-r--r--arch/x86/kvm/reverse_cpuid.h5
-rw-r--r--arch/x86/kvm/svm/sev.c62
-rw-r--r--arch/x86/kvm/svm/svm.c17
-rw-r--r--arch/x86/kvm/svm/svm.h3
-rw-r--r--arch/x86/kvm/svm/vmenter.S97
-rw-r--r--arch/x86/kvm/trace.h10
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx/vmenter.S2
-rw-r--r--arch/x86/kvm/vmx/vmx.c41
-rw-r--r--arch/x86/kvm/vmx/vmx.h6
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/retpoline.S22
-rw-r--r--arch/x86/mm/ident_map.c23
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c18
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/pat/memtype.c49
-rw-r--r--arch/x86/net/bpf_jit_comp.c19
-rw-r--r--arch/x86/virt/Makefile2
-rw-r--r--arch/x86/virt/svm/sev.c26
-rw-r--r--block/bdev.c115
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-iocost.c14
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq.c9
-rw-r--r--block/blk-settings.c19
-rw-r--r--block/blk.h1
-rw-r--r--block/ioctl.c8
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c3
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/public_key.c3
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c8
-rw-r--r--crypto/testmgr.h80
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c2
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c40
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c11
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c6
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c8
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c14
-rw-r--r--drivers/accel/qaic/Makefile5
-rw-r--r--drivers/accel/qaic/qaic.h9
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c338
-rw-r--r--drivers/accel/qaic/qaic_debugfs.h20
-rw-r--r--drivers/accel/qaic/qaic_drv.c26
-rw-r--r--drivers/accel/qaic/sahara.c449
-rw-r--r--drivers/accel/qaic/sahara.h10
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c8
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/cppc_acpi.c57
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/thermal.c22
-rw-r--r--drivers/acpi/x86/s2idle.c8
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/ahci.c85
-rw-r--r--drivers/ata/ahci_st.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c16
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/sata_gemini.c5
-rw-r--r--drivers/ata/sata_mv.c63
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/base/core.c26
-rw-r--r--drivers/base/devcoredump.c23
-rw-r--r--drivers/base/regmap/regcache-maple.c6
-rw-r--r--drivers/block/null_blk/main.c4
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btqca.c46
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_qca.c46
-rw-r--r--drivers/cache/sifive_ccache.c72
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/clk/clk.c173
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c10
-rw-r--r--drivers/cxl/Kconfig13
-rw-r--r--drivers/cxl/acpi.c13
-rw-r--r--drivers/cxl/core/cdat.c152
-rw-r--r--drivers/cxl/core/mbox.c39
-rw-r--r--drivers/cxl/core/port.c114
-rw-r--r--drivers/cxl/core/regs.c5
-rw-r--r--drivers/cxl/cxl.h8
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/dma-buf/dma-buf.c56
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c6
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c42
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
-rw-r--r--drivers/dpll/Kconfig2
-rw-r--r--drivers/dpll/dpll_core.c58
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/firmware/arm_ffa/driver.c2
-rw-r--r--drivers/firmware/arm_scmi/powercap.c2
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c7
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c37
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-tangier.c9
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c58
-rw-r--r--drivers/gpio/gpiolib.c35
-rw-r--r--drivers/gpu/drm/Kconfig55
-rw-r--r--drivers/gpu/drm/Makefile30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c360
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c506
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c416
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c96
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1014
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c214
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c269
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/Makefile77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c)38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h18
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h221
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c2
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h19
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h60
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h511
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h1106
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h38
-rw-r--r--drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c233
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h41
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h1836
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h140
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h164
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c121
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c187
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c374
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1796
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c67
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c5
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/ast/Makefile10
-rw-r--r--drivers/gpu/drm/ast/ast_ddc.c (renamed from drivers/gpu/drm/ast/ast_i2c.c)130
-rw-r--r--drivers/gpu/drm/ast/ast_ddc.h11
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h39
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c165
-rw-r--r--drivers/gpu/drm/bridge/Kconfig36
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c20
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c15
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c6
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c1
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c25
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c6
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c229
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c31
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c104
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c21
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c17
-rw-r--r--drivers/gpu/drm/ci/test.yml6
-rw-r--r--drivers/gpu/drm/display/Kconfig72
-rw-r--r--drivers/gpu/drm/display/Makefile6
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c48
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper_internal.h2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c42
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology_internal.h4
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c24
-rw-r--r--drivers/gpu/drm/drm_buddy.c427
-rw-r--r--drivers/gpu/drm/drm_client.c105
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c132
-rw-r--r--drivers/gpu/drm/drm_crtc.c38
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c100
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h15
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h13
-rw-r--r--drivers/gpu/drm/drm_displayid.c7
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h (renamed from include/drm/drm_displayid.h)6
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c268
-rw-r--r--drivers/gpu/drm/drm_eld.c4
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c42
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c34
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c101
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c45
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_modes.c40
-rw-r--r--drivers/gpu/drm/drm_panic.c585
-rw-r--r--drivers/gpu/drm/drm_plane.c56
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_print.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c95
-rw-r--r--drivers/gpu/drm/drm_sysfs.c20
-rw-r--r--drivers/gpu/drm/drm_vblank.c58
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c34
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c12
-rw-r--r--drivers/gpu/drm/i915/Kconfig8
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug4
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h273
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c293
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c242
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h117
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c353
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c389
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h186
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c713
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h102
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c248
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c596
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c613
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h120
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fixed.h (renamed from drivers/gpu/drm/i915/i915_fixed.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_regs.h348
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c47
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c322
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h309
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c470
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c22
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h327
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c225
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c95
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h26
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c66
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c19
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1407
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c52
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c60
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c21
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_step.c80
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c380
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig5
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c12
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c12
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h3
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c21
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h5
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c10
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c22
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig6
-rw-r--r--drivers/gpu/drm/mediatek/Makefile12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_crtc.c)218
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c)51
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h)9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c34
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_gem.c)68
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_gem.h)23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_plane.c)26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h (renamed from drivers/gpu/drm/mediatek/mtk_drm_plane.h)4
-rw-r--r--drivers/gpu/drm/meson/Kconfig2
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c18
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c20
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c40
-rw-r--r--drivers/gpu/drm/panel/Kconfig64
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c164
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c228
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c1
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c1112
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c320
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c6
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c35
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c344
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c44
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c285
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c123
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c87
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c6
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c18
-rw-r--r--drivers/gpu/drm/panfrost/Makefile2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_debugfs.c21
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_debugfs.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c50
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c13
-rw-r--r--drivers/gpu/drm/panthor/Kconfig23
-rw-r--r--drivers/gpu/drm/panthor/Makefile14
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c283
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h21
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c561
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h357
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c1488
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c1362
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h503
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c230
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h142
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c482
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h52
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c597
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h39
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2774
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h102
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h239
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c3499
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h50
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c50
-rw-r--r--drivers/gpu/drm/radeon/Kconfig8
-rw-r--r--drivers/gpu/drm/radeon/pptable.h10
-rw-r--r--drivers/gpu/drm/radeon/r100.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c1
-rw-r--r--drivers/gpu/drm/radeon/r420.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/rs400.c1
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c34
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c12
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c18
-rw-r--r--drivers/gpu/drm/tegra/Kconfig8
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c171
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c6
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c235
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c38
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c33
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h30
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c9
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c52
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c94
-rw-r--r--drivers/gpu/drm/v3d/v3d_sysfs.c13
-rw-r--r--drivers/gpu/drm/vc4/Kconfig10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c48
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c110
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c632
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h75
-rw-r--r--drivers/gpu/drm/xe/Kconfig15
-rw-r--r--drivers/gpu/drm/xe/Makefile17
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h200
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h57
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h26
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c16
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c24
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c39
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c240
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c7
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h18
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h3
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h65
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gtt_defs.h37
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h3
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c12
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c57
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c136
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c27
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c96
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c6
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c192
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h74
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c24
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c47
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h6
-rw-r--r--drivers/gpu/drm/xe/xe_device.c237
-rw-r--r--drivers/gpu/drm/xe/xe_device.h13
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c7
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c8
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c93
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c76
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c136
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c100
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c242
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c43
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c39
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c52
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1977
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h56
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h54
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c257
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h35
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c418
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c115
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c122
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c137
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c135
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c279
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c134
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.h51
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c116
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c232
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c253
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h11
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c13
-rw-r--r--drivers/gpu/drm/xe/xe_huc_debugfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c46
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c155
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h7
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c270
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c3
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c6
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c194
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c9
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c16
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c144
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h82
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c66
-rw-r--r--drivers/gpu/drm/xe/xe_module.c7
-rw-r--r--drivers/gpu/drm/xe/xe_module.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c23
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c117
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h6
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c327
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h13
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c38
-rw-r--r--drivers/gpu/drm/xe/xe_query.c55
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c33
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c62
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c104
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h46
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c17
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h6
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c15
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c5
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c18
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h1
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c10
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c11
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c53
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h8
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c347
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c20
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c134
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules11
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig8
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c231
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h17
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h8
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c85
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c8
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-mcp2221.c2
-rw-r--r--drivers/hid/hid-nintendo.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c38
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hv/channel.c29
-rw-r--r--drivers/hv/connection.c29
-rw-r--r--drivers/hv/vmbus_drv.c94
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/infiniband/core/cm.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/qcom/x1e80100.c26
-rw-r--r--drivers/iommu/amd/init.c25
-rw-r--r--drivers/iommu/amd/iommu.c11
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c38
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/intel/iommu.c11
-rw-r--r--drivers/iommu/intel/perfmon.c2
-rw-r--r--drivers/iommu/intel/svm.c2
-rw-r--r--drivers/iommu/iommu.c11
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c17
-rw-r--r--drivers/isdn/mISDN/socket.c10
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c35
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/platform-vsc.c17
-rw-r--r--drivers/misc/mei/vsc-tp.c84
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/mmc/core/block.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c29
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/net/dsa/mt7530.c267
-rw-r--r--drivers/net/dsa/mt7530.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c62
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c64
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c182
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c45
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c61
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c35
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c49
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c103
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c29
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c18
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/macsec.c46
-rw-r--r--drivers/net/phy/dp83869.c3
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c43
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/phy/qcom/at803x.c4
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/virtio_net.c26
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c8
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/nvme/host/core.c41
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/zns.c33
-rw-r--r--drivers/nvme/target/configfs.c47
-rw-r--r--drivers/nvme/target/core.c7
-rw-r--r--drivers/nvme/target/fc.c17
-rw-r--r--drivers/of/dynamic.c12
-rw-r--r--drivers/of/module.c8
-rw-r--r--drivers/pci/quirks.c8
-rw-r--r--drivers/perf/riscv_pmu.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h1
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c36
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/aspeed/Makefile2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c28
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c7
-rw-r--r--drivers/platform/x86/amd/pmf/core.c1
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/intel/hid.c9
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c4
-rw-r--r--drivers/platform/x86/intel/vbtn.c11
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-dwc-core.c1
-rw-r--r--drivers/pwm/pwm-dwc.c88
-rw-r--r--drivers/pwm/pwm-dwc.h6
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/ras/amd/fmpm.c57
-rw-r--r--drivers/ras/debugfs.h4
-rw-r--r--drivers/regulator/tps65132-regulator.c7
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_ops.c8
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/net/ism_drv.c37
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/ch.c20
-rw-r--r--drivers/scsi/cxlflash/main.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c10
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/libsas/sas_expander.c53
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c2
-rw-r--r--drivers/scsi/myrb.c20
-rw-r--r--drivers/scsi/myrs.c24
-rw-r--r--drivers/scsi/pmcraid.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c128
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_scan.c34
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sg.c38
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-svs.c7
-rw-r--r--drivers/soundwire/amd_manager.c15
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/spi/spi-fsl-lpspi.c14
-rw-r--r--drivers/spi/spi-pci1xxxx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c5
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/gov_power_allocator.c14
-rw-r--r--drivers/thermal/thermal_debugfs.c1
-rw-r--r--drivers/thermal/thermal_trip.c19
-rw-r--r--drivers/thunderbolt/switch.c48
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h3
-rw-r--r--drivers/thunderbolt/usb4.c13
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_core.c23
-rw-r--r--drivers/tty/serial/serial_port.c34
-rw-r--r--drivers/tty/serial/stm32-usart.c13
-rw-r--r--drivers/ufs/core/ufs-mcq.c2
-rw-r--r--drivers/ufs/core/ufshcd.c9
-rw-r--r--drivers/ufs/host/ufs-qcom.c14
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pruss.c2
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/port.c42
-rw-r--r--drivers/usb/core/sysfs.c16
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c72
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc2/hcd.c49
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c19
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c10
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/gadget/function/f_fs.c29
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c6
-rw-r--r--drivers/usb/misc/usb-ljca.c22
-rw-r--r--drivers/usb/phy/phy-generic.c7
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/storage/uas.c28
-rw-r--r--drivers/usb/typec/class.c7
-rw-r--r--drivers/usb/typec/mux/it5205.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c10
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c92
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c75
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c14
-rw-r--r--drivers/vdpa/vdpa.c6
-rw-r--r--drivers/vhost/vhost.c26
-rw-r--r--drivers/video/fbdev/Kconfig3
-rw-r--r--drivers/video/fbdev/core/Kconfig6
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/virt/vmgenid.c2
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--fs/9p/fid.h3
-rw-r--r--fs/9p/v9fs.h11
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/9p/vfs_inode.c60
-rw-r--r--fs/9p/vfs_inode_dotl.c34
-rw-r--r--fs/9p/vfs_super.c19
-rw-r--r--fs/aio.c2
-rw-r--r--fs/bcachefs/Makefile3
-rw-r--r--fs/bcachefs/acl.c30
-rw-r--r--fs/bcachefs/alloc_background.c47
-rw-r--r--fs/bcachefs/alloc_foreground.c4
-rw-r--r--fs/bcachefs/alloc_types.h3
-rw-r--r--fs/bcachefs/backpointers.c194
-rw-r--r--fs/bcachefs/backpointers.h41
-rw-r--r--fs/bcachefs/bcachefs.h10
-rw-r--r--fs/bcachefs/bcachefs_format.h29
-rw-r--r--fs/bcachefs/bkey.h6
-rw-r--r--fs/bcachefs/bkey_methods.c8
-rw-r--r--fs/bcachefs/bset.c14
-rw-r--r--fs/bcachefs/bset.h2
-rw-r--r--fs/bcachefs/btree_cache.c78
-rw-r--r--fs/bcachefs/btree_gc.c531
-rw-r--r--fs/bcachefs/btree_io.c37
-rw-r--r--fs/bcachefs/btree_iter.c52
-rw-r--r--fs/bcachefs/btree_iter.h11
-rw-r--r--fs/bcachefs/btree_journal_iter.c109
-rw-r--r--fs/bcachefs/btree_journal_iter.h8
-rw-r--r--fs/bcachefs/btree_key_cache.c23
-rw-r--r--fs/bcachefs/btree_locking.c28
-rw-r--r--fs/bcachefs/btree_node_scan.c521
-rw-r--r--fs/bcachefs/btree_node_scan.h11
-rw-r--r--fs/bcachefs/btree_node_scan_types.h30
-rw-r--r--fs/bcachefs/btree_trans_commit.c39
-rw-r--r--fs/bcachefs/btree_types.h16
-rw-r--r--fs/bcachefs/btree_update.c6
-rw-r--r--fs/bcachefs/btree_update_interior.c409
-rw-r--r--fs/bcachefs/btree_update_interior.h31
-rw-r--r--fs/bcachefs/btree_write_buffer.c28
-rw-r--r--fs/bcachefs/buckets.c12
-rw-r--r--fs/bcachefs/buckets.h9
-rw-r--r--fs/bcachefs/chardev.c104
-rw-r--r--fs/bcachefs/checksum.c23
-rw-r--r--fs/bcachefs/checksum.h5
-rw-r--r--fs/bcachefs/compress.h8
-rw-r--r--fs/bcachefs/data_update.c29
-rw-r--r--fs/bcachefs/debug.c75
-rw-r--r--fs/bcachefs/ec.c54
-rw-r--r--fs/bcachefs/ec.h2
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/error.c6
-rw-r--r--fs/bcachefs/error.h6
-rw-r--r--fs/bcachefs/extents.c70
-rw-r--r--fs/bcachefs/extents.h25
-rw-r--r--fs/bcachefs/eytzinger.c234
-rw-r--r--fs/bcachefs/eytzinger.h81
-rw-r--r--fs/bcachefs/fs-io-direct.c23
-rw-r--r--fs/bcachefs/fs-io.c16
-rw-r--r--fs/bcachefs/fs.c10
-rw-r--r--fs/bcachefs/fsck.c264
-rw-r--r--fs/bcachefs/inode.c2
-rw-r--r--fs/bcachefs/io_misc.c2
-rw-r--r--fs/bcachefs/journal_io.c77
-rw-r--r--fs/bcachefs/journal_reclaim.c2
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c3
-rw-r--r--fs/bcachefs/journal_types.h1
-rw-r--r--fs/bcachefs/logged_ops.c7
-rw-r--r--fs/bcachefs/mean_and_variance_test.c28
-rw-r--r--fs/bcachefs/opts.c33
-rw-r--r--fs/bcachefs/opts.h21
-rw-r--r--fs/bcachefs/recovery.c415
-rw-r--r--fs/bcachefs/recovery.h32
-rw-r--r--fs/bcachefs/recovery_passes.c249
-rw-r--r--fs/bcachefs/recovery_passes.h17
-rw-r--r--fs/bcachefs/recovery_passes_types.h (renamed from fs/bcachefs/recovery_types.h)11
-rw-r--r--fs/bcachefs/reflink.c3
-rw-r--r--fs/bcachefs/replicas.c19
-rw-r--r--fs/bcachefs/sb-clean.c8
-rw-r--r--fs/bcachefs/sb-downgrade.c7
-rw-r--r--fs/bcachefs/sb-errors_types.h11
-rw-r--r--fs/bcachefs/sb-members.c53
-rw-r--r--fs/bcachefs/sb-members.h21
-rw-r--r--fs/bcachefs/snapshot.c227
-rw-r--r--fs/bcachefs/snapshot.h89
-rw-r--r--fs/bcachefs/subvolume.c72
-rw-r--r--fs/bcachefs/subvolume.h3
-rw-r--r--fs/bcachefs/subvolume_types.h2
-rw-r--r--fs/bcachefs/super-io.c20
-rw-r--r--fs/bcachefs/super.c22
-rw-r--r--fs/bcachefs/super_types.h2
-rw-r--r--fs/bcachefs/sysfs.c17
-rw-r--r--fs/bcachefs/tests.c2
-rw-r--r--fs/bcachefs/thread_with_file.c15
-rw-r--r--fs/bcachefs/thread_with_file.h3
-rw-r--r--fs/bcachefs/util.c143
-rw-r--r--fs/bcachefs/util.h22
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/btrfs/backref.c12
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/delayed-inode.c3
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c34
-rw-r--r--fs/btrfs/extent_map.c18
-rw-r--r--fs/btrfs/inode.c28
-rw-r--r--fs/btrfs/ioctl.c37
-rw-r--r--fs/btrfs/messages.c2
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/btrfs/root-tree.h2
-rw-r--r--fs/btrfs/scrub.c30
-rw-r--r--fs/btrfs/tests/extent-map-tests.c5
-rw-r--r--fs/btrfs/transaction.c19
-rw-r--r--fs/btrfs/volumes.c27
-rw-r--r--fs/btrfs/zoned.c14
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/erofs/super.c1
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dir.c1
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/fuse/fuse_i.h7
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/fuse/iomode.c60
-rw-r--r--fs/gfs2/bmap.c5
-rw-r--r--fs/ioctl.c4
-rw-r--r--fs/jfs/jfs_logmgr.c4
-rw-r--r--fs/kernfs/file.c9
-rw-r--r--fs/namei.c7
-rw-r--r--fs/netfs/buffered_write.c23
-rw-r--r--fs/nfsd/nfs4callback.c26
-rw-r--r--fs/nfsd/nfs4state.c43
-rw-r--r--fs/nfsd/nfs4xdr.c47
-rw-r--r--fs/nfsd/state.h2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/ntfs3/Kconfig9
-rw-r--r--fs/ntfs3/dir.c7
-rw-r--r--fs/ntfs3/file.c8
-rw-r--r--fs/ntfs3/inode.c20
-rw-r--r--fs/ntfs3/ntfs_fs.h4
-rw-r--r--fs/ntfs3/super.c65
-rw-r--r--fs/proc/Makefile2
-rw-r--r--fs/proc/bootconfig.c12
-rw-r--r--fs/proc/page.c7
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/smb/client/cached_dir.c6
-rw-r--r--fs/smb/client/cifs_debug.c6
-rw-r--r--fs/smb/client/cifsfs.c14
-rw-r--r--fs/smb/client/cifsglob.h23
-rw-r--r--fs/smb/client/cifspdu.h4
-rw-r--r--fs/smb/client/cifsproto.h29
-rw-r--r--fs/smb/client/cifssmb.c6
-rw-r--r--fs/smb/client/connect.c180
-rw-r--r--fs/smb/client/dfs.c51
-rw-r--r--fs/smb/client/dfs.h33
-rw-r--r--fs/smb/client/dfs_cache.c53
-rw-r--r--fs/smb/client/dir.c22
-rw-r--r--fs/smb/client/file.c111
-rw-r--r--fs/smb/client/fs_context.c39
-rw-r--r--fs/smb/client/fs_context.h16
-rw-r--r--fs/smb/client/fscache.c36
-rw-r--r--fs/smb/client/fscache.h6
-rw-r--r--fs/smb/client/inode.c5
-rw-r--r--fs/smb/client/ioctl.c6
-rw-r--r--fs/smb/client/misc.c20
-rw-r--r--fs/smb/client/smb1ops.c4
-rw-r--r--fs/smb/client/smb2misc.c14
-rw-r--r--fs/smb/client/smb2ops.c114
-rw-r--r--fs/smb/client/smb2pdu.c21
-rw-r--r--fs/smb/client/smb2pdu.h2
-rw-r--r--fs/smb/client/smb2transport.c4
-rw-r--r--fs/smb/client/trace.h96
-rw-r--r--fs/smb/client/transport.c7
-rw-r--r--fs/smb/common/smb2pdu.h2
-rw-r--r--fs/smb/server/ksmbd_netlink.h38
-rw-r--r--fs/smb/server/mgmt/share_config.c7
-rw-r--r--fs/smb/server/server.c13
-rw-r--r--fs/smb/server/smb2ops.c10
-rw-r--r--fs/smb/server/smb2pdu.c18
-rw-r--r--fs/smb/server/transport_ipc.c37
-rw-r--r--fs/smb/server/vfs.c5
-rw-r--r--fs/squashfs/inode.c5
-rw-r--r--fs/super.c24
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/tracefs/event_inode.c14
-rw-r--r--fs/vboxsf/file.c1
-rw-r--r--fs/vboxsf/super.c9
-rw-r--r--fs/vboxsf/utils.c3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c40
-rw-r--r--fs/xfs/libxfs/xfs_sb.h5
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_inode.c15
-rw-r--r--fs/xfs/xfs_super.c6
-rw-r--r--fs/xfs/xfs_trans.h9
-rw-r--r--fs/zonefs/super.c2
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/bug.h5
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/asm-generic/hyperv-tlfs.h19
-rw-r--r--include/asm-generic/mshyperv.h14
-rw-r--r--include/drm/amd_asic_type.h3
-rw-r--r--include/drm/bridge/samsung-dsim.h4
-rw-r--r--include/drm/display/drm_dp.h11
-rw-r--r--include/drm/display/drm_dp_helper.h51
-rw-r--r--include/drm/display/drm_dp_mst_helper.h31
-rw-r--r--include/drm/display/drm_dsc.h3
-rw-r--r--include/drm/drm_buddy.h16
-rw-r--r--include/drm/drm_client.h10
-rw-r--r--include/drm/drm_debugfs_crc.h8
-rw-r--r--include/drm/drm_edid.h45
-rw-r--r--include/drm/drm_encoder_slave.h91
-rw-r--r--include/drm/drm_fb_dma_helper.h5
-rw-r--r--include/drm/drm_format_helper.h1
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_shmem_helper.h7
-rw-r--r--include/drm/drm_gem_vram_helper.h1
-rw-r--r--include/drm/drm_kunit_helpers.h2
-rw-r--r--include/drm/drm_lease.h2
-rw-r--r--include/drm/drm_mipi_dsi.h15
-rw-r--r--include/drm/drm_mode_config.h15
-rw-r--r--include/drm/drm_modeset_helper_vtables.h39
-rw-r--r--include/drm/drm_of.h1
-rw-r--r--include/drm/drm_panic.h152
-rw-r--r--include/drm/drm_plane.h10
-rw-r--r--include/drm/drm_print.h4
-rw-r--r--include/drm/drm_probe_helper.h6
-rw-r--r--include/drm/drm_suballoc.h2
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/gma_drm.h13
-rw-r--r--include/drm/i2c/ch7006.h1
-rw-r--r--include/drm/i2c/sil164.h1
-rw-r--r--include/drm/i915_component.h2
-rw-r--r--include/drm/i915_gsc_proxy_mei_interface.h4
-rw-r--r--include/drm/i915_hdcp_interface.h18
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/drm/i915_pxp_tee_interface.h27
-rw-r--r--include/drm/ttm/ttm_bo.h17
-rw-r--r--include/drm/ttm/ttm_caching.h2
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h7
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h4
-rw-r--r--include/drm/ttm/ttm_pool.h5
-rw-r--r--include/drm/ttm/ttm_resource.h6
-rw-r--r--include/drm/xe_pciids.h7
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/blkdev.h13
-rw-r--r--include/linux/bootconfig.h8
-rw-r--r--include/linux/bpf.h16
-rw-r--r--include/linux/cc_platform.h12
-rw-r--r--include/linux/clk.h5
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/devcoredump.h5
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-fence.h7
-rw-r--r--include/linux/energy_model.h1
-rw-r--r--include/linux/etherdevice.h25
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/framer/framer.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/gfp_types.h2
-rw-r--r--include/linux/gpio/driver.h17
-rw-r--r--include/linux/gpio/property.h1
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mman.h8
-rw-r--r--include/linux/oid_registry.h4
-rw-r--r--include/linux/page-flags.h144
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/peci.h1
-rw-r--r--include/linux/profile.h5
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/rwbase_rt.h4
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/secretmem.h4
-rw-r--r--include/linux/shmem_fs.h9
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/sockptr.h25
-rw-r--r--include/linux/stackdepot.h7
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/swapops.h65
-rw-r--r--include/linux/timecounter.h11
-rw-r--r--include/linux/timekeeping.h49
-rw-r--r--include/linux/timer.h12
-rw-r--r--include/linux/u64_stats_sync.h9
-rw-r--r--include/linux/udp.h30
-rw-r--r--include/linux/virtio.h7
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/bluetooth/bluetooth.h9
-rw-r--r--include/net/bluetooth/hci.h9
-rw-r--r--include/net/bluetooth/hci_core.h8
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/ip_tunnels.h33
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/macsec.h2
-rw-r--r--include/net/mana/mana.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h12
-rw-r--r--include/net/netfilter/nf_tables.h14
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sock.h45
-rw-r--r--include/net/tls.h3
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/sound/hdaudio_ext.h3
-rw-r--r--include/sound/intel-nhlt.h10
-rw-r--r--include/sound/tas2781-tlv.h2
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/uapi/drm/drm_mode.h11
-rw-r--r--include/uapi/drm/etnaviv_drm.h5
-rw-r--r--include/uapi/drm/i915_drm.h31
-rw-r--r--include/uapi/drm/nouveau_drm.h22
-rw-r--r--include/uapi/drm/panthor_drm.h945
-rw-r--r--include/uapi/drm/xe_drm.h25
-rw-r--r--include/uapi/linux/kfd_ioctl.h17
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/vhost.h15
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h2
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/vdso/datapage.h8
-rw-r--r--init/Kconfig2
-rw-r--r--init/initramfs.c4
-rw-r--r--init/main.c7
-rw-r--r--io_uring/io_uring.c57
-rw-r--r--io_uring/kbuf.c118
-rw-r--r--io_uring/kbuf.h8
-rw-r--r--io_uring/net.c1
-rw-r--r--io_uring/rw.c9
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arena.c25
-rw-r--r--kernel/bpf/bloom_filter.c13
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/bpf/verifier.c30
-rw-r--r--kernel/configs/hardening.config11
-rw-r--r--kernel/cpu.c13
-rw-r--r--kernel/crash_reserve.c7
-rw-r--r--kernel/dma/swiotlb.c91
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/module/Kconfig5
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/printk/printk.c6
-rw-r--r--kernel/profile.c43
-rw-r--r--kernel/sched/fair.c34
-rw-r--r--kernel/sched/isolation.c18
-rw-r--r--kernel/sched/sched.h20
-rw-r--r--kernel/sys.c7
-rw-r--r--kernel/time/posix-clock.c16
-rw-r--r--kernel/time/tick-common.c17
-rw-r--r--kernel/time/tick-sched.c54
-rw-r--r--kernel/time/tick-sched.h2
-rw-r--r--kernel/time/timer.c22
-rw-r--r--kernel/time/timer_migration.c32
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/bpf_trace.c10
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/vmcore_info.c5
-rw-r--r--lib/bootconfig.c22
-rw-r--r--lib/checksum_kunit.c5
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/test_ubsan.c2
-rw-r--r--lib/ubsan.c18
-rw-r--r--mm/Makefile3
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/gup.c68
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c50
-rw-r--r--mm/internal.h10
-rw-r--r--mm/madvise.c17
-rw-r--r--mm/memory-failure.c18
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page_owner.c223
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_quota.c10
-rw-r--r--mm/userfaultfd.c3
-rw-r--r--mm/vmalloc.c76
-rw-r--r--mm/zswap.c70
-rw-r--r--net/9p/client.c10
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bluetooth/hci_debugfs.c48
-rw-r--r--net/bluetooth/hci_event.c48
-rw-r--r--net/bluetooth/hci_request.c4
-rw-r--r--net/bluetooth/hci_sock.c21
-rw-r--r--net/bluetooth/hci_sync.c25
-rw-r--r--net/bluetooth/iso.c46
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bluetooth/l2cap_sock.c59
-rw-r--r--net/bluetooth/mgmt.c24
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c30
-rw-r--r--net/bridge/br_input.c15
-rw-r--r--net/bridge/br_netfilter_hooks.c6
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c14
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/gro.c3
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/sock_map.c6
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/hsr/hsr_device.c13
-rw-r--r--net/hsr/hsr_slave.c3
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/icmp.c12
-rw-r--r--net/ipv4/inet_connection_sock.c44
-rw-r--r--net/ipv4/inet_fragment.c70
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/netfilter/arp_tables.c8
-rw-r--r--net/ipv4/netfilter/ip_tables.c8
-rw-r--r--net/ipv4/nexthop.c4
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ao.c3
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv4/udp_offload.c23
-rw-r--r--net/ipv6/addrconf.c12
-rw-r--r--net/ipv6/ip6_fib.c21
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/mac80211/cfg.c5
-rw-r--r--net/mac80211/chan.c27
-rw-r--r--net/mac80211/debug.h2
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh.h36
-rw-r--r--net/mac80211/mesh_pathtbl.c31
-rw-r--r--net/mac80211/mlme.c46
-rw-r--r--net/mac80211/rate.c6
-rw-r--r--net/mac80211/rx.c17
-rw-r--r--net/mac80211/scan.c1
-rw-r--r--net/mac80211/tx.c13
-rw-r--r--net/mptcp/protocol.c2
-rw-r--r--net/mptcp/sockopt.c4
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c6
-rw-r--r--net/netfilter/nf_flow_table_inet.c3
-rw-r--r--net/netfilter/nf_flow_table_ip.c10
-rw-r--r--net/netfilter/nf_tables_api.c174
-rw-r--r--net/netfilter/nft_chain_filter.c4
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_set_bitmap.c4
-rw-r--r--net/netfilter/nft_set_hash.c8
-rw-r--r--net/netfilter/nft_set_pipapo.c25
-rw-r--r--net/netfilter/nft_set_rbtree.c4
-rw-r--r--net/nfc/llcp_sock.c12
-rw-r--r--net/nfc/nci/core.c5
-rw-r--r--net/openvswitch/conntrack.c9
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/sched/act_skbmod.c10
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c14
-rw-r--r--net/sunrpc/svcsock.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c86
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c5
-rw-r--r--net/tls/tls.h2
-rw-r--r--net/tls/tls_strp.c6
-rw-r--r--net/tls/tls_sw.c7
-rw-r--r--net/unix/af_unix.c16
-rw-r--r--net/unix/garbage.c18
-rw-r--r--net/vmw_vsock/virtio_transport.c3
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/trace.h6
-rw-r--r--net/wireless/wext-core.c7
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--rust/Makefile1
-rw-r--r--rust/kernel/init.rs11
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/net/phy.rs4
-rw-r--r--rust/macros/lib.rs12
-rw-r--r--rust/macros/module.rs190
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.extrawarn10
-rw-r--r--scripts/Makefile.modfinal2
-rwxr-xr-xscripts/bpf_doc.py4
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c2
-rw-r--r--scripts/kconfig/conf.c5
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lxdialog/checklist.c2
-rw-r--r--scripts/kconfig/lxdialog/dialog.h12
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c2
-rw-r--r--scripts/kconfig/lxdialog/menubox.c2
-rw-r--r--scripts/kconfig/lxdialog/textbox.c2
-rw-r--r--scripts/kconfig/lxdialog/util.c2
-rw-r--r--scripts/kconfig/lxdialog/yesno.c2
-rw-r--r--scripts/kconfig/mconf.c4
-rw-r--r--scripts/kconfig/menu.c22
-rw-r--r--scripts/kconfig/parser.y2
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--scripts/mod/modpost.c7
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/selinuxfs.c12
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c2
-rw-r--r--sound/core/seq/seq_ump_convert.c2
-rw-r--r--sound/hda/intel-nhlt.c26
-rw-r--r--sound/oss/dmasound/dmasound_paula.c8
-rw-r--r--sound/pci/emu10k1/emu10k1_callback.c7
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c6
-rw-r--r--sound/pci/hda/cs35l56_hda.c8
-rw-r--r--sound/pci/hda/cs35l56_hda_i2c.c13
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c13
-rw-r--r--sound/pci/hda/patch_realtek.c105
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c122
-rw-r--r--sound/sh/aica.c17
-rw-r--r--sound/soc/amd/acp/acp-pci.c13
-rw-r--r--sound/soc/codecs/cs-amp-lib.c5
-rw-r--r--sound/soc/codecs/cs42l43.c12
-rw-r--r--sound/soc/codecs/es8326.c37
-rw-r--r--sound/soc/codecs/es8326.h2
-rw-r--r--sound/soc/codecs/rt1316-sdw.c8
-rw-r--r--sound/soc/codecs/rt1318-sdw.c8
-rw-r--r--sound/soc/codecs/rt5682-sdw.c16
-rw-r--r--sound/soc/codecs/rt700.c16
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c6
-rw-r--r--sound/soc/codecs/rt711-sdca.c18
-rw-r--r--sound/soc/codecs/rt711-sdw.c8
-rw-r--r--sound/soc/codecs/rt711.c16
-rw-r--r--sound/soc/codecs/rt712-sdca-dmic.c24
-rw-r--r--sound/soc/codecs/rt712-sdca-sdw.c7
-rw-r--r--sound/soc/codecs/rt712-sdca.c20
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c2
-rw-r--r--sound/soc/codecs/rt715-sdca.c46
-rw-r--r--sound/soc/codecs/rt715-sdw.c4
-rw-r--r--sound/soc/codecs/rt715.c24
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c4
-rw-r--r--sound/soc/codecs/rt722-sdca.c21
-rw-r--r--sound/soc/codecs/wm_adsp.c3
-rw-r--r--sound/soc/intel/avs/boards/da7219.c1
-rw-r--r--sound/soc/intel/avs/boards/dmic.c1
-rw-r--r--sound/soc/intel/avs/boards/es8336.c1
-rw-r--r--sound/soc/intel/avs/boards/i2s_test.c1
-rw-r--r--sound/soc/intel/avs/boards/max98357a.c1
-rw-r--r--sound/soc/intel/avs/boards/max98373.c1
-rw-r--r--sound/soc/intel/avs/boards/max98927.c1
-rw-r--r--sound/soc/intel/avs/boards/nau8825.c1
-rw-r--r--sound/soc/intel/avs/boards/probe.c1
-rw-r--r--sound/soc/intel/avs/boards/rt274.c1
-rw-r--r--sound/soc/intel/avs/boards/rt286.c1
-rw-r--r--sound/soc/intel/avs/boards/rt298.c1
-rw-r--r--sound/soc/intel/avs/boards/rt5514.c1
-rw-r--r--sound/soc/intel/avs/boards/rt5663.c1
-rw-r--r--sound/soc/intel/avs/boards/rt5682.c1
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c1
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/sof/amd/acp.c8
-rw-r--r--sound/soc/sof/core.c14
-rw-r--r--sound/soc/sof/intel/hda-common-ops.c3
-rw-r--r--sound/soc/sof/intel/hda-dai-ops.c11
-rw-r--r--sound/soc/sof/intel/hda-dsp.c20
-rw-r--r--sound/soc/sof/intel/hda-pcm.c29
-rw-r--r--sound/soc/sof/intel/hda-stream.c70
-rw-r--r--sound/soc/sof/intel/hda.h6
-rw-r--r--sound/soc/sof/intel/lnl.c34
-rw-r--r--sound/soc/sof/intel/mtl.c14
-rw-r--r--sound/soc/sof/intel/mtl.h10
-rw-r--r--sound/soc/sof/ipc4-mtrace.c11
-rw-r--r--sound/soc/sof/ipc4-pcm.c191
-rw-r--r--sound/soc/sof/ipc4-priv.h14
-rw-r--r--sound/soc/sof/ipc4-topology.c41
-rw-r--r--sound/soc/sof/ops.h24
-rw-r--r--sound/soc/sof/pcm.c8
-rw-r--r--sound/soc/sof/sof-audio.h9
-rw-r--r--sound/soc/sof/sof-priv.h24
-rw-r--r--sound/usb/line6/driver.c6
-rw-r--r--tools/Makefile13
-rw-r--r--tools/arch/arm64/include/asm/cputype.h4
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h15
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h45
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h315
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h17
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h11
-rw-r--r--tools/arch/x86/include/asm/irq_vectors.h2
-rw-r--r--tools/arch/x86/include/asm/msr-index.h74
-rw-r--r--tools/arch/x86/include/asm/required-features.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h308
-rw-r--r--tools/bpf/bpftool/gen.c2
-rw-r--r--tools/hv/hv_kvp_daemon.c213
-rw-r--r--tools/include/asm-generic/bitops/__fls.h8
-rw-r--r--tools/include/asm-generic/bitops/fls.h8
-rw-r--r--tools/include/linux/btf_ids.h2
-rw-r--r--tools/include/linux/kernel.h1
-rw-r--r--tools/include/linux/mm.h5
-rw-r--r--tools/include/linux/panic.h19
-rw-r--r--tools/include/uapi/drm/i915_drm.h16
-rw-r--r--tools/include/uapi/linux/fs.h30
-rw-r--r--tools/include/uapi/linux/kvm.h689
-rw-r--r--tools/include/uapi/sound/asound.h4
-rw-r--r--tools/lib/bpf/libbpf.c10
-rw-r--r--tools/net/ynl/lib/ynl.py1
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py7
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/perf/arch/riscv/util/header.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/util/annotate.c3
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c5
-rw-r--r--tools/power/x86/turbostat/turbostat.818
-rw-r--r--tools/power/x86/turbostat/turbostat.c2017
-rw-r--r--tools/testing/cxl/test/cxl.c10
-rw-r--r--tools/testing/kunit/configs/all_tests.config3
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_htab.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_list.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c2
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c10
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c10
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c68
-rw-r--r--tools/testing/selftests/dmabuf-heaps/config3
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/settings1
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script.py10
-rw-r--r--tools/testing/selftests/exec/execveat.c12
-rw-r--r--tools/testing/selftests/exec/load_address.c34
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c53
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc2
-rw-r--r--tools/testing/selftests/iommu/config2
-rw-r--r--tools/testing/selftests/kselftest.h43
-rw-r--r--tools/testing/selftests/kselftest_harness.h19
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c4
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h11
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c15
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c2
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c39
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_counters_test.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c60
-rw-r--r--tools/testing/selftests/mm/gup_test.c2
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c1
-rw-r--r--tools/testing/selftests/mm/protection_keys.c34
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh2
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c4
-rw-r--r--tools/testing/selftests/mm/uffd-common.c3
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c13
-rw-r--r--tools/testing/selftests/mm/vm_util.h2
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c783
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh9
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh4
-rw-r--r--tools/testing/selftests/net/reuseaddr_conflict.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/proc.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/setup.c12
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c23
-rw-r--r--tools/testing/selftests/net/tcp_ao/setsockopt-closed.c2
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh205
-rw-r--r--tools/testing/selftests/net/tls.c34
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh10
-rw-r--r--tools/testing/selftests/net/udpgso.c2
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.h10
-rw-r--r--tools/testing/selftests/seccomp/settings2
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c14
-rw-r--r--tools/testing/selftests/timers/posix_timers.c105
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c73
-rwxr-xr-xtools/testing/selftests/turbostat/defcolumns.py60
-rw-r--r--virt/kvm/kvm_main.c3
-rw-r--r--virt/kvm/kvm_mm.h6
-rw-r--r--virt/kvm/pfncache.c50
2733 files changed, 75073 insertions, 25342 deletions
diff --git a/.mailmap b/.mailmap
index 2216b5d5c84e..16b704e1d5d3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -20,6 +20,7 @@ Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Adrian Bunk <bunk@stusta.de>
+Ajay Kaher <ajay.kaher@broadcom.com> <akaher@vmware.com>
Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
@@ -36,6 +37,17 @@ Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
+Alex Elder <elder@kernel.org>
+Alex Elder <elder@kernel.org> <aelder@sgi.com>
+Alex Elder <elder@kernel.org> <alex.elder@linaro.org>
+Alex Elder <elder@kernel.org> <alex.elder@linary.org>
+Alex Elder <elder@kernel.org> <elder@dreamhost.com>
+Alex Elder <elder@kernel.org> <elder@dreawmhost.com>
+Alex Elder <elder@kernel.org> <elder@ieee.org>
+Alex Elder <elder@kernel.org> <elder@inktank.com>
+Alex Elder <elder@kernel.org> <elder@linaro.org>
+Alex Elder <elder@kernel.org> <elder@newdream.net>
Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
@@ -96,6 +108,8 @@ Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
@@ -110,6 +124,7 @@ Brendan Higgins <brendan.higgins@linux.dev> <brendanhiggins@google.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
+Bryan Tan <bryan-bt.tan@broadcom.com> <bryantan@vmware.com>
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
@@ -340,7 +355,8 @@ Lee Jones <lee@kernel.org> <joneslee@google.com>
Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
Lee Jones <lee@kernel.org> <lee@ubuntu.com>
-Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@nxp.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@intel.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -442,7 +458,8 @@ Mythri P K <mythripk@ti.com>
Nadav Amit <nadav.amit@gmail.com> <namit@vmware.com>
Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
-Naoya Horiguchi <naoya.horiguchi@nec.com> <n-horiguchi@ah.jp.nec.com>
+Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
+Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
@@ -497,7 +514,8 @@ Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
-Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
+Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
+Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
@@ -519,6 +537,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Richard Genoud <richard.genoud@bootlin.com> <richard.genoud@gmail.com>
Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
@@ -527,6 +546,7 @@ Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
+Ronak Doshi <ronak.doshi@broadcom.com> <doshir@vmware.com>
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
@@ -649,6 +669,7 @@ Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
+Vishnu Dasa <vishnu.dasa@broadcom.com> <vdasa@vmware.com>
Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
diff --git a/CREDITS b/CREDITS
index c55c5a0ee4ff..0107047f807b 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3146,6 +3146,10 @@ S: Triftstra=DFe 55
S: 13353 Berlin
S: Germany
+N: Gustavo Pimental
+E: gustavo.pimentel@synopsys.com
+D: PCI driver for Synopsys DesignWare
+
N: Emanuel Pirker
E: epirker@edu.uni-klu.ac.at
D: AIC5800 IEEE 1394, RAW I/O on 1394
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 023fd82de3f7..d792a56f59ac 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -10,7 +10,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
power limit is disabled, writing 0 disables the
limit. Writing values > 0 and <= TDP will enable the power limit.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023
@@ -18,53 +18,93 @@ KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
Description: RO. Card default power limit (default TDP setting).
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Card reactive critical (I1) power limit in microwatts.
+Description: RO. Card energy input of device in microjoules.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
+Date: October 2023
+KernelVersion: 6.6
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Card sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_max
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package reactive sustained (PL1) power limit in microwatts.
+
+ The power controller will throttle the operating frequency
+ if the power averaged over a window (typically seconds)
+ exceeds this limit. A read value of 0 means that the PL1
+ power limit is disabled, writing 0 disables the
+ limit. Writing values > 0 and <= TDP will enable the power limit.
+
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_rated_max
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RO. Package default power limit (default TDP setting).
- Card reactive critical (I1) power limit in microwatts is exposed
+ Only supported for particular Intel Xe graphics platforms.
+
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_crit
+Date: February 2024
+KernelVersion: 6.8
+Contact: intel-xe@lists.freedesktop.org
+Description: RW. Package reactive critical (I1) power limit in microwatts.
+
+ Package reactive critical (I1) power limit in microwatts is exposed
for client products. The power controller will throttle the
operating frequency if the power averaged over a window exceeds
this limit.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr2_crit
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Card reactive critical (I1) power limit in milliamperes.
+Description: RW. Package reactive critical (I1) power limit in milliamperes.
- Card reactive critical (I1) power limit in milliamperes is
+ Package reactive critical (I1) power limit in milliamperes is
exposed for server products. The power controller will throttle
the operating frequency if the power averaged over a window
exceeds this limit.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy2_input
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RO. Current Voltage in millivolt.
+Description: RO. Package energy input of device in microjoules.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
-Date: September 2023
-KernelVersion: 6.5
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_max_interval
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RO. Energy input of device in microjoules.
+Description: RW. Package sustained power limit interval (Tau in PL1/Tau) in
+ milliseconds over which sustained power is averaged.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
-What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
-Date: October 2023
-KernelVersion: 6.6
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in1_input
+Date: February 2024
+KernelVersion: 6.8
Contact: intel-xe@lists.freedesktop.org
-Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
- milliseconds over which sustained power is averaged.
+Description: RO. Package current voltage in millivolt.
- Only supported for particular Intel xe graphics platforms.
+ Only supported for particular Intel Xe graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-panfrost-profiling b/Documentation/ABI/testing/sysfs-driver-panfrost-profiling
new file mode 100644
index 000000000000..7597c420e54b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-panfrost-profiling
@@ -0,0 +1,10 @@
+What: /sys/bus/platform/drivers/panfrost/.../profiling
+Date: February 2024
+KernelVersion: 6.8.0
+Contact: Adrian Larumbe <adrian.larumbe@collabora.com>
+Description:
+ Get/set drm fdinfo's engine and cycles profiling status.
+ Valid values are:
+ 0: Don't enable fdinfo job profiling sources.
+ 1: Enable fdinfo job profiling sources, this enables both the GPU's
+ timestamp and cycle counter registers.
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index cce768afec6b..25a04cda4c2c 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -138,11 +138,10 @@ associated with the source address of the indirect branch. Specifically,
the BHB might be shared across privilege levels even in the presence of
Enhanced IBRS.
-Currently the only known real-world BHB attack vector is via
-unprivileged eBPF. Therefore, it's highly recommended to not enable
-unprivileged eBPF, especially when eIBRS is used (without retpolines).
-For a full mitigation against BHB attacks, it's recommended to use
-retpolines (or eIBRS combined with retpolines).
+Previously the only known real-world BHB attack vector was via unprivileged
+eBPF. Further research has found attacks that don't require unprivileged eBPF.
+For a full mitigation against BHB attacks it is recommended to set BHI_DIS_S or
+use the BHB clearing sequence.
Attack scenarios
----------------
@@ -430,6 +429,23 @@ The possible values in this file are:
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
=========================== =======================================================
+ - Branch History Injection (BHI) protection status:
+
+.. list-table::
+
+ * - BHI: Not affected
+ - System is not affected
+ * - BHI: Retpoline
+ - System is protected by retpoline
+ * - BHI: BHI_DIS_S
+ - System is protected by BHI_DIS_S
+ * - BHI: SW loop, KVM SW loop
+ - System is protected by software clearing sequence
+ * - BHI: Vulnerable
+ - System is vulnerable to BHI
+ * - BHI: Vulnerable, KVM: SW loop
+ - System is vulnerable; KVM is protected by software clearing sequence
+
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
@@ -484,7 +500,11 @@ Spectre variant 2
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
- Spectre v2 variant attacks.
+ some Spectre v2 variant attacks. The BHB can still influence the choice of
+ indirect branch predictor entry, and although branch predictor entries are
+ isolated between modes when eIBRS is enabled, the BHB itself is not isolated
+ between modes. Systems which support BHI_DIS_S will set it to protect against
+ BHI attacks.
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
@@ -638,6 +658,18 @@ kernel command line.
spectre_v2=off. Spectre variant 1 mitigations
cannot be disabled.
+ spectre_bhi=
+
+ [X86] Control mitigation of Branch History Injection
+ (BHI) vulnerability. This setting affects the deployment
+ of the HW BHI control and the SW BHB clearing sequence.
+
+ on
+ (default) Enable the HW or SW mitigation as
+ needed.
+ off
+ Disable the mitigation.
+
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
Mitigation selection guide
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index bb884c14b2f6..213d0719e2b7 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3423,6 +3423,9 @@
arch-independent options, each of which is an
aggregation of existing arch-specific options.
+ Note, "mitigations" is supported if and only if the
+ kernel was built with CPU_MITIGATIONS=y.
+
off
Disable all optional CPU mitigations. This
improves system performance, but it may also
@@ -3444,6 +3447,7 @@
retbleed=off [X86]
spec_rstack_overflow=off [X86]
spec_store_bypass_disable=off [X86,PPC]
+ spectre_bhi=off [X86]
spectre_v2_user=off [X86]
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
@@ -6063,6 +6067,15 @@
sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/admin-guide/laptops/sonypi.rst
+ spectre_bhi= [X86] Control mitigation of Branch History Injection
+ (BHI) vulnerability. This setting affects the
+ deployment of the HW BHI control and the SW BHB
+ clearing sequence.
+
+ on - (default) Enable the HW or SW mitigation
+ as needed.
+ off - Disable the mitigation.
+
spectre_v2= [X86,EARLY] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
The default operation protects the kernel from
@@ -6599,7 +6612,7 @@
To turn off having tracepoints sent to printk,
echo 0 > /proc/sys/kernel/tracepoint_printk
Note, echoing 1 into this file without the
- tracepoint_printk kernel cmdline option has no effect.
+ tp_printk kernel cmdline option has no effect.
The tp_printk_stop_on_boot (see below) can also be used
to stop the printing of events to console at
diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst
index b42132969e31..13632671adae 100644
--- a/Documentation/admin-guide/mm/zswap.rst
+++ b/Documentation/admin-guide/mm/zswap.rst
@@ -155,7 +155,7 @@ Setting this parameter to 100 will disable the hysteresis.
Some users cannot tolerate the swapping that comes with zswap store failures
and zswap writebacks. Swapping can be disabled entirely (without disabling
-zswap itself) on a cgroup-basis as follows:
+zswap itself) on a cgroup-basis as follows::
echo 0 > /sys/fs/cgroup/<cgroup-name>/memory.zswap.writeback
@@ -166,7 +166,7 @@ writeback (because the same pages might be rejected again and again).
When there is a sizable amount of cold memory residing in the zswap pool, it
can be advantageous to proactively write these cold pages to swap and reclaim
the memory for other use cases. By default, the zswap shrinker is disabled.
-User can enable it as follows:
+User can enable it as follows::
echo Y > /sys/module/zswap/parameters/shrinker_enabled
diff --git a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
index d3504826f401..c389d4fd7599 100644
--- a/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
+++ b/Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
@@ -29,7 +29,7 @@ The essence of the process (aka 'TL;DR')
========================================
*[If you are new to building or bisecting Linux, ignore this section and head
-over to the* ":ref:`step-by-step guide<introguide_bissbs>`" *below. It utilizes
+over to the* ':ref:`step-by-step guide <introguide_bissbs>`' *below. It utilizes
the same commands as this section while describing them in brief fashion. The
steps are nevertheless easy to follow and together with accompanying entries
in a reference section mention many alternatives, pitfalls, and additional
@@ -38,8 +38,8 @@ aspects, all of which might be essential in your present case.]*
**In case you want to check if a bug is present in code currently supported by
developers**, execute just the *preparations* and *segment 1*; while doing so,
consider the newest Linux kernel you regularly use to be the 'working' kernel.
-In the following example that's assumed to be 6.0.13, which is why the sources
-of 6.0 will be used to prepare the .config file.
+In the following example that's assumed to be 6.0, which is why its sources
+will be used to prepare the .config file.
**In case you face a regression**, follow the steps at least till the end of
*segment 2*. Then you can submit a preliminary report -- or continue with
@@ -61,7 +61,7 @@ will be considered the 'good' release and used to prepare the .config file.
cd ~/linux/
git remote add -t master stable \
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
- git checkout --detach v6.0
+ git switch --detach v6.0
# * Hint: if you used an existing clone, ensure no stale .config is around.
make olddefconfig
# * Ensure the former command picked the .config of the 'working' kernel.
@@ -87,7 +87,7 @@ will be considered the 'good' release and used to prepare the .config file.
a) Checking out latest mainline code::
cd ~/linux/
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
b) Build, install, and boot a kernel::
@@ -125,7 +125,7 @@ will be considered the 'good' release and used to prepare the .config file.
a) Start by checking out the sources of the 'good' version::
cd ~/linux/
- git checkout --force --detach v6.0
+ git switch --discard-changes --detach v6.0
b) Build, install, and boot a kernel as described earlier in *segment 1,
section b* -- just feel free to skip the 'du' commands, as you have a rough
@@ -136,8 +136,7 @@ will be considered the 'good' release and used to prepare the .config file.
* **Segment 3**: perform and validate the bisection.
- a) In case your 'broken' version is a stable/longterm release, add the Git
- branch holding it::
+ a) Retrieve the sources for your 'bad' version::
git remote set-branches --add stable linux-6.1.y
git fetch stable
@@ -157,11 +156,12 @@ will be considered the 'good' release and used to prepare the .config file.
works with the newly built kernel. If it does, tell Git by executing
``git bisect good``; if it does not, run ``git bisect bad`` instead.
- All three commands will make Git checkout another commit; then re-execute
+ All three commands will make Git check out another commit; then re-execute
this step (e.g. build, install, boot, and test a kernel to then tell Git
the outcome). Do so again and again until Git shows which commit broke
things. If you run short of disk space during this process, check the
- "Supplementary tasks" section below.
+ section 'Complementary tasks: cleanup during and after the process'
+ below.
d) Once your finished the bisection, put a few things away::
@@ -172,14 +172,17 @@ will be considered the 'good' release and used to prepare the .config file.
e) Try to verify the bisection result::
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
git revert --no-edit cafec0cacaca0
+ cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
This is optional, as some commits are impossible to revert. But if the
second command worked flawlessly, build, install, and boot one more kernel
- kernel, which should not show the regression.
+ kernel; just this time skip the first command copying the base .config file
+ over, as that already has been taken care off.
-* **Supplementary tasks**: cleanup during and after the process.
+* **Complementary tasks**: cleanup during and after the process.
a) To avoid running out of disk space during a bisection, you might need to
remove some kernels you built earlier. You most likely want to keep those
@@ -202,13 +205,25 @@ will be considered the 'good' release and used to prepare the .config file.
the kernels you built earlier and later you might want to keep around for
a week or two.
+* **Optional task**: test a debug patch or a proposed fix later::
+
+ git fetch mainline
+ git switch --discard-changes --detach mainline/master
+ git apply /tmp/foobars-proposed-fix-v1.patch
+ cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+ Build, install, and boot a kernel as described in *segment 1, section b* --
+ but this time omit the first command copying the build configuration over,
+ as that has been taken care of already.
+
.. _introguide_bissbs:
Step-by-step guide on how to verify bugs and bisect regressions
===============================================================
This guide describes how to set up your own Linux kernels for investigating bugs
-or regressions you intent to report. How far you want to follow the instructions
+or regressions you intend to report. How far you want to follow the instructions
depends on your issue:
Execute all steps till the end of *segment 1* to **verify if your kernel problem
@@ -221,15 +236,17 @@ report; instead of the latter your could also head straight on and follow
*segment 3* to **perform a bisection** for a full-fledged regression report
developers are obliged to act upon.
- :ref:`Preparations: set up everything to build your own kernels.<introprep_bissbs>`
+ :ref:`Preparations: set up everything to build your own kernels <introprep_bissbs>`.
- :ref:`Segment 1: try to reproduce the problem with the latest codebase.<introlatestcheck_bissbs>`
+ :ref:`Segment 1: try to reproduce the problem with the latest codebase <introlatestcheck_bissbs>`.
- :ref:`Segment 2: check if the kernels you build work fine.<introworkingcheck_bissbs>`
+ :ref:`Segment 2: check if the kernels you build work fine <introworkingcheck_bissbs>`.
- :ref:`Segment 3: perform a bisection and validate the result.<introbisect_bissbs>`
+ :ref:`Segment 3: perform a bisection and validate the result <introbisect_bissbs>`.
- :ref:`Supplementary tasks: cleanup during and after following this guide.<introclosure_bissbs>`
+ :ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
+
+ :ref:`Optional tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
The steps in each segment illustrate the important aspects of the process, while
a comprehensive reference section holds additional details for almost all of the
@@ -240,24 +257,35 @@ to get things rolling again.
For further details on how to report Linux kernel issues or regressions check
out Documentation/admin-guide/reporting-issues.rst, which works in conjunction
with this document. It among others explains why you need to verify bugs with
-the latest 'mainline' kernel, even if you face a problem with a kernel from a
-'stable/longterm' series; for users facing a regression it also explains that
-sending a preliminary report after finishing segment 2 might be wise, as the
-regression and its culprit might be known already. For further details on
-what actually qualifies as a regression check out
-Documentation/admin-guide/reporting-regressions.rst.
+the latest 'mainline' kernel (e.g. versions like 6.0, 6.1-rc1, or 6.1-rc6),
+even if you face a problem with a kernel from a 'stable/longterm' series
+(say 6.0.13).
+
+For users facing a regression that document also explains why sending a
+preliminary report after segment 2 might be wise, as the regression and its
+culprit might be known already. For further details on what actually qualifies
+as a regression check out Documentation/admin-guide/reporting-regressions.rst.
+
+If you run into any problems while following this guide or have ideas how to
+improve it, :ref:`please let the kernel developers know <submit_improvements>`.
.. _introprep_bissbs:
Preparations: set up everything to build your own kernels
---------------------------------------------------------
+The following steps lay the groundwork for all further tasks.
+
+Note: the instructions assume you are building and testing on the same
+machine; if you want to compile the kernel on another system, check
+:ref:`Build kernels on a different machine <buildhost_bis>` below.
+
.. _backup_bissbs:
* Create a fresh backup and put system repair and restore tools at hand, just
to be prepared for the unlikely case of something going sideways.
- [:ref:`details<backup_bisref>`]
+ [:ref:`details <backup_bisref>`]
.. _vanilla_bissbs:
@@ -265,7 +293,7 @@ Preparations: set up everything to build your own kernels
builds them automatically. That includes but is not limited to DKMS, openZFS,
VirtualBox, and Nvidia's graphics drivers (including the GPLed kernel module).
- [:ref:`details<vanilla_bisref>`]
+ [:ref:`details <vanilla_bisref>`]
.. _secureboot_bissbs:
@@ -276,48 +304,49 @@ Preparations: set up everything to build your own kernels
their restrictions through a process initiated by
``mokutil --disable-validation``.
- [:ref:`details<secureboot_bisref>`]
+ [:ref:`details <secureboot_bisref>`]
.. _rangecheck_bissbs:
* Determine the kernel versions considered 'good' and 'bad' throughout this
- guide.
+ guide:
- Do you follow this guide to verify if a bug is present in the code developers
- care for? Then consider the mainline release your 'working' kernel (the newest
- one you regularly use) is based on to be the 'good' version; if your 'working'
- kernel for example is 6.0.11, then your 'good' kernel is 6.0.
+ * Do you follow this guide to verify if a bug is present in the code the
+ primary developers care for? Then consider the version of the newest kernel
+ you regularly use currently as 'good' (e.g. 6.0, 6.0.13, or 6.1-rc2).
- In case you face a regression, it depends on the version range where the
- regression was introduced:
+ * Do you face a regression, e.g. something broke or works worse after
+ switching to a newer kernel version? In that case it depends on the version
+ range during which the problem appeared:
- * Something which used to work in Linux 6.0 broke when switching to Linux
- 6.1-rc1? Then henceforth regard 6.0 as the last known 'good' version
- and 6.1-rc1 as the first 'bad' one.
+ * Something regressed when updating from a stable/longterm release
+ (say 6.0.13) to a newer mainline series (like 6.1-rc7 or 6.1) or a
+ stable/longterm version based on one (say 6.1.5)? Then consider the
+ mainline release your working kernel is based on to be the 'good'
+ version (e.g. 6.0) and the first version to be broken as the 'bad' one
+ (e.g. 6.1-rc7, 6.1, or 6.1.5). Note, at this point it is merely assumed
+ that 6.0 is fine; this hypothesis will be checked in segment 2.
- * Some function stopped working when updating from 6.0.11 to 6.1.4? Then for
- the time being consider 6.0 as the last 'good' version and 6.1.4 as
- the 'bad' one. Note, at this point it is merely assumed that 6.0 is fine;
- this assumption will be checked in segment 2.
+ * Something regressed when switching from one mainline version (say 6.0) to
+ a later one (like 6.1-rc1) or a stable/longterm release based on it
+ (say 6.1.5)? Then regard the last working version (e.g. 6.0) as 'good' and
+ the first broken (e.g. 6.1-rc1 or 6.1.5) as 'bad'.
- * A feature you used in 6.0.11 does not work at all or worse in 6.1.13? In
- that case you want to bisect within a stable/longterm series: consider
- 6.0.11 as the last known 'good' version and 6.0.13 as the first 'bad'
- one. Note, in this case you still want to compile and test a mainline kernel
- as explained in segment 1: the outcome will determine if you need to report
- your issue to the regular developers or the stable team.
+ * Something regressed when updating within a stable/longterm series (say
+ from 6.0.13 to 6.0.15)? Then consider those versions as 'good' and 'bad'
+ (e.g. 6.0.13 and 6.0.15), as you need to bisect within that series.
*Note, do not confuse 'good' version with 'working' kernel; the latter term
throughout this guide will refer to the last kernel that has been working
fine.*
- [:ref:`details<rangecheck_bisref>`]
+ [:ref:`details <rangecheck_bisref>`]
.. _bootworking_bissbs:
* Boot into the 'working' kernel and briefly use the apparently broken feature.
- [:ref:`details<bootworking_bisref>`]
+ [:ref:`details <bootworking_bisref>`]
.. _diskspace_bissbs:
@@ -327,7 +356,7 @@ Preparations: set up everything to build your own kernels
debug symbols: both explain approaches reducing the amount of space, which
should allow you to master these tasks with about 4 Gigabytes free space.
- [:ref:`details<diskspace_bisref>`]
+ [:ref:`details <diskspace_bisref>`]
.. _buildrequires_bissbs:
@@ -337,7 +366,7 @@ Preparations: set up everything to build your own kernels
reference section shows how to quickly install those on various popular Linux
distributions.
- [:ref:`details<buildrequires_bisref>`]
+ [:ref:`details <buildrequires_bisref>`]
.. _sources_bissbs:
@@ -360,14 +389,23 @@ Preparations: set up everything to build your own kernels
git remote add -t master stable \
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
- [:ref:`details<sources_bisref>`]
+ [:ref:`details <sources_bisref>`]
+
+.. _stablesources_bissbs:
+
+* Is one of the versions you earlier established as 'good' or 'bad' a stable or
+ longterm release (say 6.1.5)? Then download the code for the series it belongs
+ to ('linux-6.1.y' in this example)::
+
+ git remote set-branches --add stable linux-6.1.y
+ git fetch stable
.. _oldconfig_bissbs:
* Start preparing a kernel build configuration (the '.config' file).
Before doing so, ensure you are still running the 'working' kernel an earlier
- step told you to boot; if you are unsure, check the current kernel release
+ step told you to boot; if you are unsure, check the current kernelrelease
identifier using ``uname -r``.
Afterwards check out the source code for the version earlier established as
@@ -375,7 +413,7 @@ Preparations: set up everything to build your own kernels
the version number in this and all later Git commands needs to be prefixed
with a 'v'::
- git checkout --detach v6.0
+ git switch --discard-changes --detach v6.0
Now create a build configuration file::
@@ -398,7 +436,7 @@ Preparations: set up everything to build your own kernels
'make olddefconfig' again and check if it now picked up the right config file
as base.
- [:ref:`details<oldconfig_bisref>`]
+ [:ref:`details <oldconfig_bisref>`]
.. _localmodconfig_bissbs:
@@ -432,7 +470,7 @@ Preparations: set up everything to build your own kernels
spending much effort on, as long as it boots and allows to properly test the
feature that causes trouble.
- [:ref:`details<localmodconfig_bisref>`]
+ [:ref:`details <localmodconfig_bisref>`]
.. _tagging_bissbs:
@@ -442,7 +480,7 @@ Preparations: set up everything to build your own kernels
./scripts/config --set-str CONFIG_LOCALVERSION '-local'
./scripts/config -e CONFIG_LOCALVERSION_AUTO
- [:ref:`details<tagging_bisref>`]
+ [:ref:`details <tagging_bisref>`]
.. _debugsymbols_bissbs:
@@ -461,7 +499,7 @@ Preparations: set up everything to build your own kernels
./scripts/config -d DEBUG_INFO -d DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT \
-d DEBUG_INFO_DWARF4 -d DEBUG_INFO_DWARF5 -e CONFIG_DEBUG_INFO_NONE
- [:ref:`details<debugsymbols_bisref>`]
+ [:ref:`details <debugsymbols_bisref>`]
.. _configmods_bissbs:
@@ -471,14 +509,14 @@ Preparations: set up everything to build your own kernels
* Are you running Debian? Then you want to avoid known problems by performing
additional adjustments explained in the reference section.
- [:ref:`details<configmods_distros_bisref>`].
+ [:ref:`details <configmods_distros_bisref>`].
* If you want to influence other aspects of the configuration, do so now using
your preferred tool. Note, to use make targets like 'menuconfig' or
'nconfig', you will need to install the development files of ncurses; for
'xconfig' you likewise need the Qt5 or Qt6 headers.
- [:ref:`details<configmods_individual_bisref>`].
+ [:ref:`details <configmods_individual_bisref>`].
.. _saveconfig_bissbs:
@@ -488,7 +526,7 @@ Preparations: set up everything to build your own kernels
make olddefconfig
cp .config ~/kernel-config-working
- [:ref:`details<saveconfig_bisref>`]
+ [:ref:`details <saveconfig_bisref>`]
.. _introlatestcheck_bissbs:
@@ -498,16 +536,30 @@ Segment 1: try to reproduce the problem with the latest codebase
The following steps verify if the problem occurs with the code currently
supported by developers. In case you face a regression, it also checks that the
problem is not caused by some .config change, as reporting the issue then would
-be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
+be a waste of time. [:ref:`details <introlatestcheck_bisref>`]
.. _checkoutmaster_bissbs:
-* Check out the latest Linux codebase::
+* Check out the latest Linux codebase.
- cd ~/linux/
- git checkout --force --detach mainline/master
+ * Are your 'good' and 'bad' versions from the same stable or longterm series?
+ Then check the `front page of kernel.org <https://kernel.org/>`_: if it
+ lists a release from that series without an '[EOL]' tag, checkout the series
+ latest version ('linux-6.1.y' in the following example)::
+
+ cd ~/linux/
+ git switch --discard-changes --detach stable/linux-6.1.y
+
+ Your series is unsupported, if is not listed or carrying a 'end of life'
+ tag. In that case you might want to check if a successor series (say
+ linux-6.2.y) or mainline (see next point) fix the bug.
- [:ref:`details<checkoutmaster_bisref>`]
+ * In all other cases, run::
+
+ cd ~/linux/
+ git switch --discard-changes --detach mainline/master
+
+ [:ref:`details <checkoutmaster_bisref>`]
.. _build_bissbs:
@@ -522,7 +574,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
reference section for alternatives, which obviously will require other
steps to install as well.
- [:ref:`details<build_bisref>`]
+ [:ref:`details <build_bisref>`]
.. _install_bissbs:
@@ -555,7 +607,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
down: if you will build more kernels as described in segment 2 and 3, you will
have to perform those again after executing ``command -v installkernel [...]``.
- [:ref:`details<install_bisref>`]
+ [:ref:`details <install_bisref>`]
.. _storagespace_bissbs:
@@ -568,7 +620,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
Write down or remember those two values for later: they enable you to prevent
running out of disk space accidentally during a bisection.
- [:ref:`details<storagespace_bisref>`]
+ [:ref:`details <storagespace_bisref>`]
.. _kernelrelease_bissbs:
@@ -595,7 +647,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
If that command does not return '0', check the reference section, as the cause
for this might interfere with your testing.
- [:ref:`details<tainted_bisref>`]
+ [:ref:`details <tainted_bisref>`]
.. _recheckbroken_bissbs:
@@ -603,21 +655,19 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
out the instructions in the reference section to ensure nothing went sideways
during your tests.
- [:ref:`details<recheckbroken_bisref>`]
+ [:ref:`details <recheckbroken_bisref>`]
.. _recheckstablebroken_bissbs:
-* Are you facing a problem within a stable/longterm series, but failed to
- reproduce it with the mainline kernel you just built? One that according to
- the `front page of kernel.org <https://kernel.org/>`_ is still supported? Then
- check if the latest codebase for the particular series might already fix the
- problem. To do so, add the stable series Git branch for your 'good' kernel
- (again, this here is assumed to be 6.0) and check out the latest version::
+* Did you just built a stable or longterm kernel? And were you able to reproduce
+ the regression with it? Then you should test the latest mainline codebase as
+ well, because the result determines which developers the bug must be submitted
+ to.
+
+ To prepare that test, check out current mainline::
cd ~/linux/
- git remote set-branches --add stable linux-6.0.y
- git fetch stable
- git checkout --force --detach linux-6.0.y
+ git switch --discard-changes --detach mainline/master
Now use the checked out code to build and install another kernel using the
commands the earlier steps already described in more detail::
@@ -639,14 +689,16 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
uname -r
cat /proc/sys/kernel/tainted
- Now verify if this kernel is showing the problem.
+ Now verify if this kernel is showing the problem. If it does, then you need
+ to report the bug to the primary developers; if it does not, report it to the
+ stable team. See Documentation/admin-guide/reporting-issues.rst for details.
- [:ref:`details<recheckstablebroken_bisref>`]
+ [:ref:`details <recheckstablebroken_bisref>`]
Do you follow this guide to verify if a problem is present in the code
currently supported by Linux kernel developers? Then you are done at this
point. If you later want to remove the kernel you just built, check out
-:ref:`Supplementary tasks: cleanup during and after following this guide<introclosure_bissbs>`.
+:ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
In case you face a regression, move on and execute at least the next segment
as well.
@@ -658,7 +710,7 @@ Segment 2: check if the kernels you build work fine
In case of a regression, you now want to ensure the trimmed configuration file
you created earlier works as expected; a bisection with the .config file
-otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
+otherwise would be a waste of time. [:ref:`details <introworkingcheck_bisref>`]
.. _recheckworking_bissbs:
@@ -669,7 +721,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
'good' (once again assumed to be 6.0 here)::
cd ~/linux/
- git checkout --detach v6.0
+ git switch --discard-changes --detach v6.0
Now use the checked out code to configure, build, and install another kernel
using the commands the previous subsection explained in more detail::
@@ -693,7 +745,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
Now check if this kernel works as expected; if not, consult the reference
section for further instructions.
- [:ref:`details<recheckworking_bisref>`]
+ [:ref:`details <recheckworking_bisref>`]
.. _introbisect_bissbs:
@@ -703,18 +755,11 @@ Segment 3: perform the bisection and validate the result
With all the preparations and precaution builds taken care of, you are now ready
to begin the bisection. This will make you build quite a few kernels -- usually
about 15 in case you encountered a regression when updating to a newer series
-(say from 6.0.11 to 6.1.3). But do not worry, due to the trimmed build
+(say from 6.0.13 to 6.1.5). But do not worry, due to the trimmed build
configuration created earlier this works a lot faster than many people assume:
overall on average it will often just take about 10 to 15 minutes to compile
each kernel on commodity x86 machines.
-* In case your 'bad' version is a stable/longterm release (say 6.1.5), add its
- stable branch, unless you already did so earlier::
-
- cd ~/linux/
- git remote set-branches --add stable linux-6.1.y
- git fetch stable
-
.. _bisectstart_bissbs:
* Start the bisection and tell Git about the versions earlier established as
@@ -725,7 +770,7 @@ each kernel on commodity x86 machines.
git bisect good v6.0
git bisect bad v6.1.5
- [:ref:`details<bisectstart_bisref>`]
+ [:ref:`details <bisectstart_bisref>`]
.. _bisectbuild_bissbs:
@@ -745,7 +790,7 @@ each kernel on commodity x86 machines.
If compilation fails for some reason, run ``git bisect skip`` and restart
executing the stack of commands from the beginning.
- In case you skipped the "test latest codebase" step in the guide, check its
+ In case you skipped the 'test latest codebase' step in the guide, check its
description as for why the 'df [...]' and 'make -s kernelrelease [...]'
commands are here.
@@ -754,7 +799,7 @@ each kernel on commodity x86 machines.
totally normal to see release identifiers like '6.0-rc1-local-gcafec0cacaca0'
if you bisect between versions 6.1 and 6.2 for example.
- [:ref:`details<bisectbuild_bisref>`]
+ [:ref:`details <bisectbuild_bisref>`]
.. _bisecttest_bissbs:
@@ -794,7 +839,7 @@ each kernel on commodity x86 machines.
might need to scroll up to see the message mentioning the culprit;
alternatively, run ``git bisect log > ~/bisection-log``.
- [:ref:`details<bisecttest_bisref>`]
+ [:ref:`details <bisecttest_bisref>`]
.. _bisectlog_bissbs:
@@ -806,7 +851,7 @@ each kernel on commodity x86 machines.
cp .config ~/bisection-config-culprit
git bisect reset
- [:ref:`details<bisectlog_bisref>`]
+ [:ref:`details <bisectlog_bisref>`]
.. _revert_bissbs:
@@ -823,16 +868,16 @@ each kernel on commodity x86 machines.
Begin by checking out the latest codebase depending on the range you bisected:
* Did you face a regression within a stable/longterm series (say between
- 6.0.11 and 6.0.13) that does not happen in mainline? Then check out the
+ 6.0.13 and 6.0.15) that does not happen in mainline? Then check out the
latest codebase for the affected series like this::
git fetch stable
- git checkout --force --detach linux-6.0.y
+ git switch --discard-changes --detach linux-6.0.y
* In all other cases check out latest mainline::
git fetch mainline
- git checkout --force --detach mainline/master
+ git switch --discard-changes --detach mainline/master
If you bisected a regression within a stable/longterm series that also
happens in mainline, there is one more thing to do: look up the mainline
@@ -846,27 +891,33 @@ each kernel on commodity x86 machines.
git revert --no-edit cafec0cacaca0
- If that fails, give up trying and move on to the next step. But if it works,
- build a kernel again using the familiar command sequence::
+ If that fails, give up trying and move on to the next step; if it works,
+ adjust the tag to facilitate the identification and prevent accidentally
+ overwriting another kernel::
cp ~/kernel-config-working .config
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+ Build a kernel using the familiar command sequence, just without copying the
+ the base .config over::
+
make olddefconfig &&
- make -j $(nproc --all) &&
+ make -j $(nproc --all)
# * Check if the free space suffices holding another kernel:
df -h /boot/ /lib/modules/
sudo make modules_install
command -v installkernel && sudo make install
- Make -s kernelrelease | tee -a ~/kernels-built
+ make -s kernelrelease | tee -a ~/kernels-built
reboot
- Now check one last time if the feature that made you perform a bisection work
- with that kernel.
+ Now check one last time if the feature that made you perform a bisection works
+ with that kernel: if everything went well, it should not show the regression.
- [:ref:`details<revert_bisref>`]
+ [:ref:`details <revert_bisref>`]
.. _introclosure_bissbs:
-Supplementary tasks: cleanup during and after the bisection
+Complementary tasks: cleanup during and after the bisection
-----------------------------------------------------------
During and after following this guide you might want or need to remove some of
@@ -903,7 +954,7 @@ space might run out.
kernel image and related files behind; in that case remove them as described
in the reference section.
- [:ref:`details<makeroom_bisref>`]
+ [:ref:`details <makeroom_bisref>`]
.. _finishingtouch_bissbs:
@@ -926,18 +977,99 @@ space might run out.
the version considered 'good', and the last three or four you compiled
during the actual bisection process.
- [:ref:`details<finishingtouch_bisref>`]
+ [:ref:`details <finishingtouch_bisref>`]
+
+.. _introoptional_bissbs:
+
+Optional: test reverts, patches, or later versions
+--------------------------------------------------
+
+While or after reporting a bug, you might want or potentially will be asked to
+test reverts, debug patches, proposed fixes, or other versions. In that case
+follow these instructions.
+
+* Update your Git clone and check out the latest code.
+
+ * In case you want to test mainline, fetch its latest changes before checking
+ its code out::
+
+ git fetch mainline
+ git switch --discard-changes --detach mainline/master
+
+ * In case you want to test a stable or longterm kernel, first add the branch
+ holding the series you are interested in (6.2 in the example), unless you
+ already did so earlier::
+
+ git remote set-branches --add stable linux-6.2.y
+
+ Then fetch the latest changes and check out the latest version from the
+ series::
+
+ git fetch stable
+ git switch --discard-changes --detach stable/linux-6.2.y
+
+* Copy your kernel build configuration over::
+
+ cp ~/kernel-config-working .config
+
+* Your next step depends on what you want to do:
+
+ * In case you just want to test the latest codebase, head to the next step,
+ you are already all set.
+
+ * In case you want to test if a revert fixes an issue, revert one or multiple
+ changes by specifying their commit ids::
+
+ git revert --no-edit cafec0cacaca0
+
+ Now give that kernel a special tag to facilitates its identification and
+ prevent accidentally overwriting another kernel::
+
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+ * In case you want to test a patch, store the patch in a file like
+ '/tmp/foobars-proposed-fix-v1.patch' and apply it like this::
+
+ git apply /tmp/foobars-proposed-fix-v1.patch
+
+ In case of multiple patches, repeat this step with the others.
+
+ Now give that kernel a special tag to facilitates its identification and
+ prevent accidentally overwriting another kernel::
+
+ ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+* Build a kernel using the familiar commands, just without copying the kernel
+ build configuration over, as that has been taken care of already::
+
+ make olddefconfig &&
+ make -j $(nproc --all)
+ # * Check if the free space suffices holding another kernel:
+ df -h /boot/ /lib/modules/
+ sudo make modules_install
+ command -v installkernel && sudo make install
+ make -s kernelrelease | tee -a ~/kernels-built
+ reboot
+
+* Now verify you booted the newly built kernel and check it.
+
+[:ref:`details <introoptional_bisref>`]
.. _submit_improvements:
-This concludes the step-by-step guide.
+Conclusion
+----------
+
+You have reached the end of the step-by-step guide.
Did you run into trouble following any of the above steps not cleared up by the
reference section below? Did you spot errors? Or do you have ideas how to
-improve the guide? Then please take a moment and let the maintainer of this
+improve the guide?
+
+If any of that applies, please take a moment and let the maintainer of this
document know by email (Thorsten Leemhuis <linux@leemhuis.info>), ideally while
CCing the Linux docs mailing list (linux-doc@vger.kernel.org). Such feedback is
-vital to improve this document further, which is in everybody's interest, as it
+vital to improve this text further, which is in everybody's interest, as it
will enable more people to master the task described here -- and hopefully also
improve similar guides inspired by this one.
@@ -948,10 +1080,20 @@ Reference section for the step-by-step guide
This section holds additional information for almost all the items in the above
step-by-step guide.
+Preparations for building your own kernels
+------------------------------------------
+
+ *The steps in this section lay the groundwork for all further tests.*
+ [:ref:`... <introprep_bissbs>`]
+
+The steps in all later sections of this guide depend on those described here.
+
+[:ref:`back to step-by-step guide <introprep_bissbs>`].
+
.. _backup_bisref:
Prepare for emergencies
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
*Create a fresh backup and put system repair and restore tools at hand.*
[:ref:`... <backup_bissbs>`]
@@ -966,7 +1108,7 @@ for something going sideways, even if that should not happen.
.. _vanilla_bisref:
Remove anything related to externally maintained kernel modules
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Remove all software that depends on externally developed kernel drivers or
builds them automatically.* [:ref:`...<vanilla_bissbs>`]
@@ -984,7 +1126,7 @@ explains in more detail.
.. _secureboot_bisref:
Deal with techniques like Secure Boot
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*On platforms with 'Secure Boot' or similar techniques, prepare everything to
ensure the system will permit your self-compiled kernel to boot later.*
@@ -1021,7 +1163,7 @@ Afterwards, permit MokManager to reboot the machine.
.. _bootworking_bisref:
Boot the last kernel that was working
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Boot into the last working kernel and briefly recheck if the feature that
regressed really works.* [:ref:`...<bootworking_bissbs>`]
@@ -1034,7 +1176,7 @@ the right thing.
.. _diskspace_bisref:
Space requirements
-------------------
+~~~~~~~~~~~~~~~~~~
*Ensure to have enough free space for building Linux.*
[:ref:`... <diskspace_bissbs>`]
@@ -1052,32 +1194,32 @@ space by quite a few gigabytes.
.. _rangecheck_bisref:
Bisection range
----------------
+~~~~~~~~~~~~~~~
*Determine the kernel versions considered 'good' and 'bad' throughout this
guide.* [:ref:`...<rangecheck_bissbs>`]
Establishing the range of commits to be checked is mostly straightforward,
except when a regression occurred when switching from a release of one stable
-series to a release of a later series (e.g. from 6.0.11 to 6.1.4). In that case
+series to a release of a later series (e.g. from 6.0.13 to 6.1.5). In that case
Git will need some hand holding, as there is no straight line of descent.
That's because with the release of 6.0 mainline carried on to 6.1 while the
stable series 6.0.y branched to the side. It's therefore theoretically possible
-that the issue you face with 6.1.4 only worked in 6.0.11, as it was fixed by a
+that the issue you face with 6.1.5 only worked in 6.0.13, as it was fixed by a
commit that went into one of the 6.0.y releases, but never hit mainline or the
6.1.y series. Thankfully that normally should not happen due to the way the
stable/longterm maintainers maintain the code. It's thus pretty safe to assume
6.0 as a 'good' kernel. That assumption will be tested anyway, as that kernel
will be built and tested in the segment '2' of this guide; Git would force you
-to do this as well, if you tried bisecting between 6.0.11 and 6.1.13.
+to do this as well, if you tried bisecting between 6.0.13 and 6.1.15.
[:ref:`back to step-by-step guide <rangecheck_bissbs>`]
.. _buildrequires_bisref:
Install build requirements
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Install all software required to build a Linux kernel.*
[:ref:`...<buildrequires_bissbs>`]
@@ -1117,7 +1259,7 @@ These commands install a few packages that are often, but not always needed. You
for example might want to skip installing the development headers for ncurses,
which you will only need in case you later might want to adjust the kernel build
configuration using make the targets 'menuconfig' or 'nconfig'; likewise omit
-the headers of Qt6 is you do not plan to adjust the .config using 'xconfig'.
+the headers of Qt6 if you do not plan to adjust the .config using 'xconfig'.
You furthermore might need additional libraries and their development headers
for tasks not covered in this guide -- for example when building utilities from
@@ -1128,7 +1270,7 @@ the kernel's tools/ directory.
.. _sources_bisref:
Download the sources using Git
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Retrieve the Linux mainline sources.*
[:ref:`...<sources_bissbs>`]
@@ -1148,7 +1290,7 @@ work better for you:
.. _sources_bundle_bisref:
Downloading Linux mainline sources using a bundle
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""""""""""""""""""""""""
Use the following commands to retrieve the Linux mainline sources using a
bundle::
@@ -1184,7 +1326,7 @@ First, execute the following command to retrieve the latest mainline codebase::
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
Now deepen your clone's history to the second predecessor of the mainline
-release of your 'good' version. In case the latter are 6.0 or 6.0.11, 5.19 would
+release of your 'good' version. In case the latter are 6.0 or 6.0.13, 5.19 would
be the first predecessor and 5.18 the second -- hence deepen the history up to
that version::
@@ -1219,7 +1361,7 @@ Note, shallow clones have a few peculiar characteristics:
.. _oldconfig_bisref:
Start defining the build configuration for your kernel
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Start preparing a kernel build configuration (the '.config' file).*
[:ref:`... <oldconfig_bissbs>`]
@@ -1279,7 +1421,7 @@ that file to the build machine and store it as ~/linux/.config; afterwards run
.. _localmodconfig_bisref:
Trim the build configuration for your kernel
---------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Disable any kernel modules apparently superfluous for your setup.*
[:ref:`... <localmodconfig_bissbs>`]
@@ -1328,7 +1470,7 @@ step-by-step guide mentions::
.. _tagging_bisref:
Tag the kernels about to be build
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Ensure all the kernels you will build are clearly identifiable using a
special tag and a unique version identifier.* [:ref:`... <tagging_bissbs>`]
@@ -1344,7 +1486,7 @@ confusing during the bisection.
.. _debugsymbols_bisref:
Decide to enable or disable debug symbols
------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Decide how to handle debug symbols.* [:ref:`... <debugsymbols_bissbs>`]
@@ -1373,7 +1515,7 @@ explains this process in more detail.
.. _configmods_bisref:
Adjust build configuration
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check if you may want or need to adjust some other kernel configuration
options:*
@@ -1384,7 +1526,7 @@ kernel configuration options.
.. _configmods_distros_bisref:
Distro specific adjustments
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""
*Are you running* [:ref:`... <configmods_bissbs>`]
@@ -1409,7 +1551,7 @@ when following this guide on a few commodity distributions.
.. _configmods_individual_bisref:
Individual adjustments
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
*If you want to influence the other aspects of the configuration, do so
now.* [:ref:`... <configmods_bissbs>`]
@@ -1426,13 +1568,13 @@ is missing.
.. _saveconfig_bisref:
Put the .config file aside
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Reprocess the .config after the latest changes and store it in a safe place.*
[:ref:`... <saveconfig_bissbs>`]
Put the .config you prepared aside, as you want to copy it back to the build
-directory every time during this guide before you start building another
+directory every time during this guide before you start building another
kernel. That's because going back and forth between different versions can alter
.config files in odd ways; those occasionally cause side effects that could
confuse testing or in some cases render the result of your bisection
@@ -1442,8 +1584,8 @@ meaningless.
.. _introlatestcheck_bisref:
-Try to reproduce the regression
------------------------------------------
+Try to reproduce the problem with the latest codebase
+-----------------------------------------------------
*Verify the regression is not caused by some .config change and check if it
still occurs with the latest codebase.* [:ref:`... <introlatestcheck_bissbs>`]
@@ -1490,28 +1632,28 @@ highly recommended for these reasons:
Your report might be ignored if you send it to the wrong party -- and even
when you get a reply there is a decent chance that developers tell you to
- evaluate which of the two cases it is before they take a closer look.
+ evaluate which of the two cases it is before they take a closer look.
[:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
.. _checkoutmaster_bisref:
Check out the latest Linux codebase
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check out the latest Linux codebase.*
- [:ref:`... <introlatestcheck_bissbs>`]
+ [:ref:`... <checkoutmaster_bissbs>`]
In case you later want to recheck if an ever newer codebase might fix the
problem, remember to run that ``git fetch --shallow-exclude [...]`` command
again mentioned earlier to update your local Git repository.
-[:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
+[:ref:`back to step-by-step guide <checkoutmaster_bissbs>`]
.. _build_bisref:
Build your kernel
------------------
+~~~~~~~~~~~~~~~~~
*Build the image and the modules of your first kernel using the config file
you prepared.* [:ref:`... <build_bissbs>`]
@@ -1521,7 +1663,7 @@ yourself. Another subsection explains how to directly package your kernel up as
deb, rpm or tar file.
Dealing with build errors
-~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""
When a build error occurs, it might be caused by some aspect of your machine's
setup that often can be fixed quickly; other times though the problem lies in
@@ -1552,11 +1694,11 @@ by modifying your search terms or using another line from the error messages.
In the end, most issues you run into have likely been encountered and
reported by others already. That includes issues where the cause is not your
-system, but lies in the code. If you run into one of those, you might thus find a
-solution (e.g. a patch) or workaround for your issue, too.
+system, but lies in the code. If you run into one of those, you might thus find
+a solution (e.g. a patch) or workaround for your issue, too.
Package your kernel up
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
The step-by-step guide uses the default make targets (e.g. 'bzImage' and
'modules' on x86) to build the image and the modules of your kernel, which later
@@ -1587,7 +1729,7 @@ distribution's kernel packages.
.. _install_bisref:
Put the kernel in place
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
*Install the kernel you just built.* [:ref:`... <install_bissbs>`]
@@ -1630,7 +1772,7 @@ process. Afterwards add your kernel to your bootloader configuration and reboot.
.. _storagespace_bisref:
Storage requirements per kernel
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check how much storage space the kernel, its modules, and other related files
like the initramfs consume.* [:ref:`... <storagespace_bissbs>`]
@@ -1651,7 +1793,7 @@ need to look in different places.
.. _tainted_bisref:
Check if your newly built kernel considers itself 'tainted'
------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Check if the kernel marked itself as 'tainted'.*
[:ref:`... <tainted_bissbs>`]
@@ -1670,7 +1812,7 @@ interest, as your testing might be flawed otherwise.
.. _recheckbroken_bisref:
Check the kernel built from a recent mainline codebase
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Verify if your bug occurs with the newly built kernel.*
[:ref:`... <recheckbroken_bissbs>`]
@@ -1696,7 +1838,7 @@ the kernel you built from the latest codebase. These are the most frequent:
.. _recheckstablebroken_bisref:
Check the kernel built from the latest stable/longterm codebase
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Are you facing a regression within a stable/longterm release, but failed to
reproduce it with the kernel you just built using the latest mainline sources?
@@ -1741,7 +1883,7 @@ ensure the kernel version you assumed to be 'good' earlier in the process (e.g.
.. _recheckworking_bisref:
Build your own version of the 'good' kernel
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Build your own variant of the working kernel and check if the feature that
regressed works as expected with it.* [:ref:`... <recheckworking_bissbs>`]
@@ -1767,15 +1909,25 @@ multitude of reasons why this might happen. Some ideas where to look:
Note, if you found and fixed problems with the .config file, you want to use it
to build another kernel from the latest codebase, as your earlier tests with
-mainline and the latest version from an affected stable/longterm series were most
-likely flawed.
+mainline and the latest version from an affected stable/longterm series were
+most likely flawed.
[:ref:`back to step-by-step guide <recheckworking_bissbs>`]
+Perform a bisection and validate the result
+-------------------------------------------
+
+ *With all the preparations and precaution builds taken care of, you are now
+ ready to begin the bisection.* [:ref:`... <introbisect_bissbs>`]
+
+The steps in this segment perform and validate the bisection.
+
+[:ref:`back to step-by-step guide <introbisect_bissbs>`].
+
.. _bisectstart_bisref:
Start the bisection
--------------------
+~~~~~~~~~~~~~~~~~~~
*Start the bisection and tell Git about the versions earlier established as
'good' and 'bad'.* [:ref:`... <bisectstart_bissbs>`]
@@ -1789,7 +1941,7 @@ for you to test.
.. _bisectbuild_bisref:
Build a kernel from the bisection point
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Build, install, and boot a kernel from the code Git checked out using the
same commands you used earlier.* [:ref:`... <bisectbuild_bissbs>`]
@@ -1817,7 +1969,7 @@ There are two things worth of note here:
.. _bisecttest_bisref:
Bisection checkpoint
---------------------
+~~~~~~~~~~~~~~~~~~~~
*Check if the feature that regressed works in the kernel you just built.*
[:ref:`... <bisecttest_bissbs>`]
@@ -1831,7 +1983,7 @@ will be for nothing.
.. _bisectlog_bisref:
Put the bisection log away
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
*Store Git's bisection log and the current .config file in a safe place.*
[:ref:`... <bisectlog_bissbs>`]
@@ -1851,7 +2003,7 @@ ask for it after you report the regression.
.. _revert_bisref:
Try reverting the culprit
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
*Try reverting the culprit on top of the latest codebase to see if this fixes
your regression.* [:ref:`... <revert_bissbs>`]
@@ -1869,14 +2021,20 @@ succeeds, test that kernel version instead.
[:ref:`back to step-by-step guide <revert_bissbs>`]
+Cleanup steps during and after following this guide
+---------------------------------------------------
-Supplementary tasks: cleanup during and after the bisection
------------------------------------------------------------
+ *During and after following this guide you might want or need to remove some
+ of the kernels you installed.* [:ref:`... <introclosure_bissbs>`]
+
+The steps in this section describe clean-up procedures.
+
+[:ref:`back to step-by-step guide <introclosure_bissbs>`].
.. _makeroom_bisref:
Cleaning up during the bisection
---------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*To remove one of the kernels you installed, look up its 'kernelrelease'
identifier.* [:ref:`... <makeroom_bissbs>`]
@@ -1911,13 +2069,13 @@ Now remove the boot entry for the kernel from your bootloader's configuration;
the steps to do that vary quite a bit between Linux distributions.
Note, be careful with wildcards like '*' when deleting files or directories
-for kernels manually: you might accidentally remove files of a 6.0.11 kernel
+for kernels manually: you might accidentally remove files of a 6.0.13 kernel
when all you want is to remove 6.0 or 6.0.1.
[:ref:`back to step-by-step guide <makeroom_bissbs>`]
Cleaning up after the bisection
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _finishingtouch_bisref:
@@ -1932,26 +2090,105 @@ build artifacts and the Linux sources, but will leave the Git repository
(~/linux/.git/) behind -- a simple ``git reset --hard`` thus will bring the
sources back.
-Removing the repository as well would likely be unwise at this point: there is a
-decent chance developers will ask you to build another kernel to perform
-additional tests. This is often required to debug an issue or check proposed
-fixes. Before doing so you want to run the ``git fetch mainline`` command again
-followed by ``git checkout mainline/master`` to bring your clone up to date and
-checkout the latest codebase. Then apply the patch using ``git apply
-<filename>`` or ``git am <filename>`` and build yet another kernel using the
-familiar commands.
+Removing the repository as well would likely be unwise at this point: there
+is a decent chance developers will ask you to build another kernel to
+perform additional tests -- like testing a debug patch or a proposed fix.
+Details on how to perform those can be found in the section :ref:`Optional
+tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
Additional tests are also the reason why you want to keep the
~/kernel-config-working file around for a few weeks.
[:ref:`back to step-by-step guide <finishingtouch_bissbs>`]
+.. _introoptional_bisref:
-Additional reading material
-===========================
+Test reverts, patches, or later versions
+----------------------------------------
+
+ *While or after reporting a bug, you might want or potentially will be asked
+ to test reverts, patches, proposed fixes, or other versions.*
+ [:ref:`... <introoptional_bissbs>`]
+
+All the commands used in this section should be pretty straight forward, so
+there is not much to add except one thing: when setting a kernel tag as
+instructed, ensure it is not much longer than the one used in the example, as
+problems will arise if the kernelrelease identifier exceeds 63 characters.
+
+[:ref:`back to step-by-step guide <introoptional_bissbs>`].
+
+
+Additional information
+======================
+
+.. _buildhost_bis:
+
+Build kernels on a different machine
+------------------------------------
+
+To compile kernels on another system, slightly alter the step-by-step guide's
+instructions:
+
+* Start following the guide on the machine where you want to install and test
+ the kernels later.
+
+* After executing ':ref:`Boot into the working kernel and briefly use the
+ apparently broken feature <bootworking_bissbs>`', save the list of loaded
+ modules to a file using ``lsmod > ~/test-machine-lsmod``. Then locate the
+ build configuration for the running kernel (see ':ref:`Start defining the
+ build configuration for your kernel <oldconfig_bisref>`' for hints on where
+ to find it) and store it as '~/test-machine-config-working'. Transfer both
+ files to the home directory of your build host.
+
+* Continue the guide on the build host (e.g. with ':ref:`Ensure to have enough
+ free space for building [...] <diskspace_bissbs>`').
+
+* When you reach ':ref:`Start preparing a kernel build configuration[...]
+ <oldconfig_bissbs>`': before running ``make olddefconfig`` for the first time,
+ execute the following command to base your configuration on the one from the
+ test machine's 'working' kernel::
+
+ cp ~/test-machine-config-working ~/linux/.config
+
+* During the next step to ':ref:`disable any apparently superfluous kernel
+ modules <localmodconfig_bissbs>`' use the following command instead::
-Further sources
----------------
+ yes '' | make localmodconfig LSMOD=~/lsmod_foo-machine localmodconfig
+
+* Continue the guide, but ignore the instructions outlining how to compile,
+ install, and reboot into a kernel every time they come up. Instead build
+ like this::
+
+ cp ~/kernel-config-working .config
+ make olddefconfig &&
+ make -j $(nproc --all) targz-pkg
+
+ This will generate a gzipped tar file whose name is printed in the last
+ line shown; for example, a kernel with the kernelrelease identifier
+ '6.0.0-rc1-local-g928a87efa423' built for x86 machines usually will
+ be stored as '~/linux/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz'.
+
+ Copy that file to your test machine's home directory.
+
+* Switch to the test machine to check if you have enough space to hold another
+ kernel. Then extract the file you transferred::
+
+ sudo tar -xvzf ~/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz -C /
+
+ Afterwards :ref:`generate the initramfs and add the kernel to your boot
+ loader's configuration <install_bisref>`; on some distributions the following
+ command will take care of both these tasks::
+
+ sudo /sbin/installkernel 6.0.0-rc1-local-g928a87efa423 /boot/vmlinuz-6.0.0-rc1-local-g928a87efa423
+
+ Now reboot and ensure you started the intended kernel.
+
+This approach even works when building for another architecture: just install
+cross-compilers and add the appropriate parameters to every invocation of make
+(e.g. ``make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- [...]``).
+
+Additional reading material
+---------------------------
* The `man page for 'git bisect' <https://git-scm.com/docs/git-bisect>`_ and
`fighting regressions with 'git bisect' <https://git-scm.com/docs/git-bisect-lk2009.html>`_
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index 3712d81cb50c..6c245582d8fb 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -574,7 +574,7 @@ Memory b/w domain is L3 cache.
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
Memory bandwidth Allocation specified in MiBps
----------------------------------------------
+----------------------------------------------
Memory bandwidth domain is L3 cache.
::
diff --git a/Documentation/dev-tools/testing-overview.rst b/Documentation/dev-tools/testing-overview.rst
index 0aaf6ea53608..1619e5e5cc9c 100644
--- a/Documentation/dev-tools/testing-overview.rst
+++ b/Documentation/dev-tools/testing-overview.rst
@@ -104,6 +104,8 @@ Some of these tools are listed below:
KASAN and can be used in production. See Documentation/dev-tools/kfence.rst
* lockdep is a locking correctness validator. See
Documentation/locking/lockdep-design.rst
+* Runtime Verification (RV) supports checking specific behaviours for a given
+ subsystem. See Documentation/trace/rv/runtime-verification.rst
* There are several other pieces of debug instrumentation in the kernel, many
of which can be found in lib/Kconfig.debug
diff --git a/Documentation/devicetree/bindings/clock/keystone-gate.txt b/Documentation/devicetree/bindings/clock/keystone-gate.txt
index c5aa187026e3..43f6fb6c9392 100644
--- a/Documentation/devicetree/bindings/clock/keystone-gate.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-gate.txt
@@ -1,5 +1,3 @@
-Status: Unstable - ABI compatibility may be broken in the future
-
Binding for Keystone gate control driver which uses PSC controller IP.
This binding uses the common clock binding[1].
diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
index 9a3fbc665606..69b0eb7c03c9 100644
--- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
@@ -1,5 +1,3 @@
-Status: Unstable - ABI compatibility may be broken in the future
-
Binding for keystone PLLs. The main PLL IP typically has a multiplier,
a divider and a post divider. The additional PLL IPs like ARMPLL, DDRPLL
and PAPLL are controlled by the memory mapped register where as the Main
diff --git a/Documentation/devicetree/bindings/clock/ti/adpll.txt b/Documentation/devicetree/bindings/clock/ti/adpll.txt
index 4c8a2ce2cd70..3122360adcf3 100644
--- a/Documentation/devicetree/bindings/clock/ti/adpll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/adpll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments ADPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped ADPLL with two to three selectable input clocks
and three to four children.
diff --git a/Documentation/devicetree/bindings/clock/ti/apll.txt b/Documentation/devicetree/bindings/clock/ti/apll.txt
index ade4dd4c30f0..bbd505c1199d 100644
--- a/Documentation/devicetree/bindings/clock/ti/apll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/apll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments APLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped APLL with usually two selectable input clocks
(reference clock and bypass clock), with analog phase locked
diff --git a/Documentation/devicetree/bindings/clock/ti/autoidle.txt b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
index 7c735dde9fe9..05645a10a9e3 100644
--- a/Documentation/devicetree/bindings/clock/ti/autoidle.txt
+++ b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments autoidle clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a register mapped
clock which can be put to idle automatically by hardware based on the usage
and a configuration bit setting. Autoidle clock is never an individual
diff --git a/Documentation/devicetree/bindings/clock/ti/clockdomain.txt b/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
index 9c6199249ce5..edf0b5d42768 100644
--- a/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
+++ b/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments clockdomain.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1] in consumer role.
Every clock on TI SoC belongs to one clockdomain, but software
only needs this information for specific clocks which require
diff --git a/Documentation/devicetree/bindings/clock/ti/composite.txt b/Documentation/devicetree/bindings/clock/ti/composite.txt
index 33ac7c9ad053..6f7e1331b546 100644
--- a/Documentation/devicetree/bindings/clock/ti/composite.txt
+++ b/Documentation/devicetree/bindings/clock/ti/composite.txt
@@ -1,7 +1,5 @@
Binding for TI composite clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped composite clock with multiple different sub-types;
diff --git a/Documentation/devicetree/bindings/clock/ti/divider.txt b/Documentation/devicetree/bindings/clock/ti/divider.txt
index 9b13b32974f9..4d7c76f0b356 100644
--- a/Documentation/devicetree/bindings/clock/ti/divider.txt
+++ b/Documentation/devicetree/bindings/clock/ti/divider.txt
@@ -1,7 +1,5 @@
Binding for TI divider clock
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped adjustable clock rate divider that does not gate and has
only one input clock or parent. By default the value programmed into
diff --git a/Documentation/devicetree/bindings/clock/ti/dpll.txt b/Documentation/devicetree/bindings/clock/ti/dpll.txt
index 37a7cb6ad07d..14a1b72c2e71 100644
--- a/Documentation/devicetree/bindings/clock/ti/dpll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/dpll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments DPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped DPLL with usually two selectable input clocks
(reference clock and bypass clock), with digital phase locked
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
index c19b3f253b8c..88986ef39ddd 100644
--- a/Documentation/devicetree/bindings/clock/ti/fapll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/fapll.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments FAPLL clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped FAPLL with usually two selectable input clocks
(reference clock and bypass clock), and one or more child
diff --git a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
index 518e3c142276..dc69477b6e98 100644
--- a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
+++ b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
@@ -1,7 +1,5 @@
Binding for TI fixed factor rate clock sources.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1], and also uses the autoidle
support from TI autoidle clock [2].
diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt
index 4982615c01b9..a8e0335b006a 100644
--- a/Documentation/devicetree/bindings/clock/ti/gate.txt
+++ b/Documentation/devicetree/bindings/clock/ti/gate.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments gate clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. This clock is
quite much similar to the basic gate-clock [2], however,
it supports a number of additional features. If no register
diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt
index d3eb5ca92a7f..85fb1f2d2d28 100644
--- a/Documentation/devicetree/bindings/clock/ti/interface.txt
+++ b/Documentation/devicetree/bindings/clock/ti/interface.txt
@@ -1,7 +1,5 @@
Binding for Texas Instruments interface clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. This clock is
quite much similar to the basic gate-clock [2], however,
it supports a number of additional features, including
diff --git a/Documentation/devicetree/bindings/clock/ti/mux.txt b/Documentation/devicetree/bindings/clock/ti/mux.txt
index b33f641f1043..cd56d3c1c09f 100644
--- a/Documentation/devicetree/bindings/clock/ti/mux.txt
+++ b/Documentation/devicetree/bindings/clock/ti/mux.txt
@@ -1,7 +1,5 @@
Binding for TI mux clock.
-Binding status: Unstable - ABI compatibility may be broken in the future
-
This binding uses the common clock binding[1]. It assumes a
register-mapped multiplexer with multiple input clock signals or
parents, one of which can be selected as output. This clock does not
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index c9a882ee6d98..c4469f463978 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -9,6 +9,9 @@ title: ITE it6505
maintainers:
- Allen Chen <allen.chen@ite.com.tw>
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
description: |
The IT6505 is a high-performance DisplayPort 1.1a transmitter,
fully compliant with DisplayPort 1.1a, HDCP 1.3 specifications.
@@ -52,6 +55,9 @@ properties:
maxItems: 1
description: extcon specifier for the Power Delivery
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -105,7 +111,7 @@ required:
- extcon
- ports
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 84aafcbf0919..6ceeed76e88e 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -41,6 +41,7 @@ properties:
- enum:
- ti,ds90cf364a # For the DS90CF364A FPD-Link LVDS Receiver
- ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
+ - ti,sn65lvds94 # For the SN65DS94 LVDS serdes
- const: lvds-decoder # Generic LVDS decoders compatible fallback
- enum:
- thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
diff --git a/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml b/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
new file mode 100644
index 000000000000..862ef441ac9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/microchip,sam9x75-lvds.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip SAM9X75 LVDS Controller
+
+maintainers:
+ - Dharma Balasubiramani <dharma.b@microchip.com>
+
+description:
+ The Low Voltage Differential Signaling Controller (LVDSC) manages data
+ format conversion from the LCD Controller internal DPI bus to OpenLDI
+ LVDS output signals. LVDSC functions include bit mapping, balanced mode
+ management, and serializer.
+
+properties:
+ compatible:
+ const: microchip,sam9x75-lvds
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Peripheral Bus Clock
+
+ clock-names:
+ items:
+ - const: pclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/at91.h>
+ lvds-controller@f8060000 {
+ compatible = "microchip,sam9x75-lvds";
+ reg = <0xf8060000 0x100>;
+ interrupts = <56 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 56>;
+ clock-names = "pclk";
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
index d879c700594a..258dd9cfd770 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
@@ -10,7 +10,7 @@ maintainers:
- Vinay Simha BN <simhavcs@gmail.com>
description: |
- This binding supports DSI to LVDS bridge TC358775
+ This binding supports DSI to LVDS bridges TC358765 and TC358775
MIPI DSI-RX Data 4-lane, CLK 1-lane with data rates up to 800 Mbps/lane.
Video frame size:
@@ -21,7 +21,9 @@ description: |
properties:
compatible:
- const: toshiba,tc358775
+ enum:
+ - toshiba,tc358765
+ - toshiba,tc358775
reg:
maxItems: 1
@@ -46,11 +48,27 @@ properties:
properties:
port@0:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
DSI Input. The remote endpoint phandle should be a
reference to a valid mipi_dsi_host device node.
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: |
@@ -70,10 +88,19 @@ required:
- reg
- vdd-supply
- vddio-supply
- - stby-gpios
- reset-gpios
- ports
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: toshiba,tc358765
+ then:
+ properties:
+ stby-gpios: false
+
additionalProperties: false
examples:
@@ -108,6 +135,7 @@ examples:
reg = <0>;
d2l_in_test: endpoint {
remote-endpoint = <&dsi0_out>;
+ data-lanes = <1 2 3 4>;
};
};
@@ -132,7 +160,6 @@ examples:
reg = <1>;
dsi0_out: endpoint {
remote-endpoint = <&d2l_in_test>;
- data-lanes = <0 1 2 3>;
};
};
};
@@ -167,6 +194,7 @@ examples:
reg = <0>;
d2l_in_dual: endpoint {
remote-endpoint = <&dsi0_out_dual>;
+ data-lanes = <1 2 3 4>;
};
};
@@ -198,7 +226,6 @@ examples:
reg = <1>;
dsi0_out_dual: endpoint {
remote-endpoint = <&d2l_in_dual>;
- data-lanes = <0 1 2 3>;
};
};
};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
index c6641acd75d6..b8b8e83ebc3f 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
@@ -24,6 +24,7 @@ properties:
- enum:
- mediatek,mt8173-disp-gamma
- mediatek,mt8183-disp-gamma
+ - mediatek,mt8195-disp-gamma
- items:
- enum:
- mediatek,mt6795-disp-gamma
@@ -35,6 +36,10 @@ properties:
- mediatek,mt8192-disp-gamma
- mediatek,mt8195-disp-gamma
- const: mediatek,mt8183-disp-gamma
+ - items:
+ - enum:
+ - mediatek,mt8188-disp-gamma
+ - const: mediatek,mt8195-disp-gamma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index b1e624be3e33..a015dce72f60 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -19,6 +19,7 @@ properties:
- ampire,am8001280g
- bananapi,lhr050h41
- feixin,k101-im2byl02
+ - startek,kd050hdfia020
- tdo,tl050hdv35
- wanchanglong,w552946aba
- const: ilitek,ili9881c
diff --git a/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
new file mode 100644
index 000000000000..1e08648f5bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lg,sw43408.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG SW43408 1080x2160 DSI panel
+
+maintainers:
+ - Caleb Connolly <caleb.connolly@linaro.org>
+
+description:
+ This panel is used on the Pixel 3, it is a 60hz OLED panel which
+ required DSC (Display Stream Compression) and has rounded corners.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: lg,sw43408
+
+ reg: true
+ port: true
+ vddi-supply: true
+ vpnl-supply: true
+ reset-gpios: true
+
+required:
+ - compatible
+ - vddi-supply
+ - vpnl-supply
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "lg,sw43408";
+ reg = <0>;
+
+ vddi-supply = <&vreg_l14a_1p88>;
+ vpnl-supply = <&vreg_l28a_3p0>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
index 377a05d48a02..7cac93b20944 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml
@@ -19,7 +19,7 @@ description: |
either bilinear interpolation or pixel duplication.
allOf:
- - $ref: panel-common.yaml#
+ - $ref: panel-common-dual.yaml#
properties:
compatible:
@@ -59,6 +59,7 @@ required:
- avee-supply
- dvdd-supply
- vddio-supply
+ - ports
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
index 5f7e4c486094..bbeea8cfa5fb 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
@@ -14,9 +14,6 @@ description: |
panels. Support video mode panels from China Star Optoelectronics
Technology (CSOT) and BOE Technology.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
oneOf:
@@ -38,7 +35,6 @@ properties:
description: regulator that supplies the I/O voltage
reg: true
- ports: true
rotation: true
backlight: true
@@ -47,7 +43,26 @@ required:
- reg
- vddio-supply
- reset-gpios
- - ports
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - novatek,nt36523w
+ then:
+ properties:
+ ports:
+ properties:
+ port@1: false
+ else:
+ properties:
+ port: false
+ ports:
+ required:
+ - port@1
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml b/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml
new file mode 100644
index 000000000000..cc7ea3c35c77
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-common-dual.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-common-dual.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for Dual-Link Display Panels
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ Properties common for Panel IC supporting dual link panels. Devices might
+ support also single link.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ additionalProperties: false
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: First link
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Second link
+
+ "#address-cells": true
+ "#size-cells": true
+
+ required:
+ - port@0
+
+# Single-panel setups are still allowed.
+oneOf:
+ - required:
+ - ports
+ - required:
+ - port
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index f9160d7bac3c..db5acd2807ed 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -36,6 +36,8 @@ properties:
- jdi,fhd-r63452
# Khadas TS050 5" 1080x1920 LCD panel
- khadas,ts050
+ # Khadas TS050 V2 5" 1080x1920 LCD panel
+ - khadas,ts050v2
# Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
- kingdisplay,kd097d04
# LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
@@ -50,6 +52,8 @@ properties:
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
+ # Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel
+ - samsung,s6e3fa7-ams559nk06
# Samsung s6e3fc2x01 1080x2340 AMOLED panel
- samsung,s6e3fc2x01
# Samsung sofef00 1080x2280 AMOLED panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index a95445f40870..931d98836e12 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -91,6 +91,8 @@ properties:
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
+ # Crystal Clear Technology CMT430B19N00 4.3" 480x272 TFT-LCD panel
+ - cct,cmt430b19n00
# CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
- cdtech,s043wq26h-ct7
# CDTech(H.K.) Electronics Limited 7" WSVGA (1024x600) TFT LCD Panel
@@ -188,6 +190,8 @@ properties:
- innolux,g121i1-l01
# Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
- innolux,g121x1-l03
+ # Innolux Corporation 12.1" G121XCE-L01 XGA (1024x768) TFT LCD panel
+ - innolux,g121xce-l01
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
@@ -272,6 +276,8 @@ properties:
- osddisplays,osd070t1718-19ts
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2045-53ts
+ # POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
+ - powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
- powertip,ph800480t013-idf02
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml
new file mode 100644
index 000000000000..b17765b2b351
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm69380.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm69380.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium RM69380-based DSI display panels
+
+maintainers:
+ - David Wronek <david@mainlining.org>
+
+description:
+ The Raydium RM69380 is a generic DSI panel IC used to control
+ OLED panels.
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lenovo,j716f-edo-rm69380
+ - const: raydium,rm69380
+ description: This indicates the panel manufacturer of the panel
+ that is in turn using the RM69380 panel driver. The compatible
+ string determines how the RM69380 panel driver shall be configured
+ to work with the indicated panel. The raydium,rm69380 compatible shall
+ always be provided as a fallback.
+
+ avdd-supply:
+ description: Analog voltage rail
+
+ vddio-supply:
+ description: I/O voltage rail
+
+ reset-gpios:
+ maxItems: 1
+ description: phandle of gpio for reset line - This should be active low
+
+ reg: true
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vddio-supply
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "lenovo,j716f-edo-rm69380", "raydium,rm69380";
+ reg = <0>;
+
+ avdd-supply = <&panel_avdd_regulator>;
+ vddio-supply = <&vreg_l14a>;
+ reset-gpios = <&tlmm 75 GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ panel_in_0: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ panel_in_1: endpoint {
+ remote-endpoint = <&mdss_dsi1_out>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
index 6ec471284f97..4ae152cc55e0 100644
--- a/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
+++ b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
@@ -22,6 +22,8 @@ properties:
enum:
# Anberic RG353V-V2 5.0" 640x480 TFT LCD panel
- anbernic,rg353v-panel-v2
+ # GameForce Chi 3.5" 640x480 TFT LCD panel
+ - gameforce,chi-panel
# Powkiddy RGB10MAX3 5.0" 720x1280 TFT LCD panel
- powkiddy,rgb10max3-panel
# Powkiddy RGB30 3.0" 720x720 TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml b/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
index b6b885b4c22d..07bce556ad40 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,td4353-jdi.yaml
@@ -23,6 +23,8 @@ properties:
reg: true
backlight: true
+ width-mm: true
+ height-mm: true
vddio-supply:
description: VDDIO 1.8V supply
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
index af638b6c0d21..2aac62219ff6 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
@@ -15,6 +15,7 @@ description: |
allOf:
- $ref: ../bridge/synopsys,dw-hdmi.yaml#
+ - $ref: /schemas/sound/dai-common.yaml#
properties:
compatible:
@@ -124,6 +125,9 @@ properties:
description:
phandle to the GRF to mux vopl/vopb.
+ "#sound-dai-cells":
+ const: 0
+
required:
- compatible
- reg
@@ -153,6 +157,7 @@ examples:
ddc-i2c-bus = <&i2c5>;
power-domains = <&power RK3288_PD_VIO>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
index be78dcfa1c76..5b87b0f1963e 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
@@ -37,6 +37,9 @@ properties:
power-domains:
maxItems: 1
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -66,6 +69,7 @@ required:
- ports
allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
- if:
properties:
compatible:
@@ -106,6 +110,7 @@ examples:
clock-names = "pclk";
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 1a68a940d165..6d4b78a36576 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -10,6 +10,9 @@ maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
properties:
compatible:
const: rockchip,rk3066-hdmi
@@ -34,6 +37,9 @@ properties:
description:
This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
+ "#sound-dai-cells":
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -83,6 +89,7 @@ examples:
pinctrl-names = "default";
power-domains = <&power RK3066_PD_VIO>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/dts-coding-style.rst b/Documentation/devicetree/bindings/dts-coding-style.rst
index a9bdd2b59dca..8a68331075a0 100644
--- a/Documentation/devicetree/bindings/dts-coding-style.rst
+++ b/Documentation/devicetree/bindings/dts-coding-style.rst
@@ -144,6 +144,8 @@ Example::
#dma-cells = <1>;
clocks = <&clock_controller 0>, <&clock_controller 1>;
clock-names = "bus", "host";
+ #address-cells = <1>;
+ #size-cells = <1>;
vendor,custom-property = <2>;
status = "disabled";
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 1812ef31d5f1..3c36cd0510de 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -69,14 +69,10 @@ properties:
- items:
pattern: c32$
- items:
- pattern: c32d-wl$
- - items:
pattern: cs32$
- items:
pattern: c64$
- items:
- pattern: c64d-wl$
- - items:
pattern: cs64$
- items:
pattern: c128$
@@ -136,6 +132,7 @@ properties:
- renesas,r1ex24128
- samsung,s524ad0xd1
- const: atmel,24c128
+ - pattern: '^atmel,24c(32|64)d-wl$' # Actual vendor is st
label:
description: Descriptive name of the EEPROM.
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
new file mode 100644
index 000000000000..a5b4e0021758
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/arm,mali-valhall-csf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Mali Valhall GPU
+
+maintainers:
+ - Liviu Dudau <liviu.dudau@arm.com>
+ - Boris Brezillon <boris.brezillon@collabora.com>
+
+properties:
+ $nodename:
+ pattern: '^gpu@[a-f0-9]+$'
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - rockchip,rk3588-mali
+ - const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Job interrupt
+ - description: MMU interrupt
+ - description: GPU interrupt
+
+ interrupt-names:
+ items:
+ - const: job
+ - const: mmu
+ - const: gpu
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: coregroup
+ - const: stacks
+
+ mali-supply: true
+
+ operating-points-v2: true
+ opp-table:
+ type: object
+
+ power-domains:
+ minItems: 1
+ maxItems: 5
+
+ power-domain-names:
+ minItems: 1
+ maxItems: 5
+
+ sram-supply: true
+
+ "#cooling-cells":
+ const: 2
+
+ dynamic-power-coefficient:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ A u32 value that represents the running time dynamic
+ power coefficient in units of uW/MHz/V^2. The
+ coefficient can either be calculated from power
+ measurements or derived by analysis.
+
+ The dynamic power consumption of the GPU is
+ proportional to the square of the Voltage (V) and
+ the clock frequency (f). The coefficient is used to
+ calculate the dynamic power as below -
+
+ Pdyn = dynamic-power-coefficient * V^2 * f
+
+ where voltage is in V, frequency is in MHz.
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - mali-supply
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3588-mali
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ power-domains:
+ maxItems: 1
+ power-domain-names: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3588-power.h>
+
+ gpu: gpu@fb000000 {
+ compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
+ reg = <0xfb000000 0x200000>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ clock-names = "core", "coregroup", "stacks";
+ clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
+ <&cru CLK_GPU_STACKS>;
+ power-domains = <&power RK3588_PD_GPU>;
+ operating-points-v2 = <&gpu_opp_table>;
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index 528ef3572b62..055a3351880b 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -94,6 +94,10 @@ properties:
local-bd-address: true
+ qcom,local-bd-address-broken:
+ type: boolean
+ description:
+ boot firmware is incorrectly passing the address in big-endian order
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
index afcdeed4e88a..bc813fe74fab 100644
--- a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
+++ b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
@@ -52,6 +52,9 @@ properties:
- const: main
- const: mm
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
index 25f8658e216f..48a49c516b62 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
@@ -1,9 +1,6 @@
TI Davinci DSP devices
=======================
-Binding status: Unstable - Subject to changes for DT representation of clocks
- and resets
-
The TI Davinci family of SoCs usually contains a TI DSP Core sub-system that
is used to offload some of the processor-intensive tasks or algorithms, for
achieving various system level goals.
diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
index 65cb2e5c5eee..eb2992a447d7 100644
--- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
+++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
maintainers:
- - Richard Genoud <richard.genoud@gmail.com>
+ - Richard Genoud <richard.genoud@bootlin.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
index 397f75909b20..ce1a6505eb51 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
@@ -51,7 +51,7 @@ properties:
ranges: true
patternProperties:
- "^clock-controller@[0-9a-z]+$":
+ "^clock-controller@[0-9a-f]+$":
$ref: /schemas/clock/fsl,flexspi-clock.yaml#
required:
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
index 8d088b5fe823..a6a511b00a12 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
@@ -41,7 +41,7 @@ properties:
ranges: true
patternProperties:
- "^interrupt-controller@[a-z0-9]+$":
+ "^interrupt-controller@[a-f0-9]+$":
$ref: /schemas/interrupt-controller/fsl,ls-extirq.yaml#
required:
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 0b87c266760c..79798c747476 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -171,6 +171,7 @@ allOf:
unevaluatedProperties: false
pcie-phy:
+ type: object
description:
Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index 7a4a6ab85970..ab8f28993139 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -60,7 +60,7 @@ properties:
be implemented in an always-on power domain."
patternProperties:
- '^frame@[0-9a-z]*$':
+ '^frame@[0-9a-f]+$':
type: object
additionalProperties: false
description: A timer node has up to 8 frame sub-nodes, each with the following properties.
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index 10c146424baa..cd3680dc002f 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -27,10 +27,13 @@ properties:
- qcom,msm8996-ufshc
- qcom,msm8998-ufshc
- qcom,sa8775p-ufshc
+ - qcom,sc7180-ufshc
- qcom,sc7280-ufshc
+ - qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sdm845-ufshc
- qcom,sm6115-ufshc
+ - qcom,sm6125-ufshc
- qcom,sm6350-ufshc
- qcom,sm8150-ufshc
- qcom,sm8250-ufshc
@@ -42,11 +45,11 @@ properties:
- const: jedec,ufs-2.0
clocks:
- minItems: 8
+ minItems: 7
maxItems: 11
clock-names:
- minItems: 8
+ minItems: 7
maxItems: 11
dma-coherent: true
@@ -117,9 +120,35 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc7180-ufshc
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: core_clk
+ - const: bus_aggr_clk
+ - const: iface_clk
+ - const: core_clk_unipro
+ - const: ref_clk
+ - const: tx_lane0_sync_clk
+ - const: rx_lane0_sync_clk
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,msm8998-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7280-ufshc
+ - qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
@@ -215,6 +244,7 @@ allOf:
contains:
enum:
- qcom,sm6115-ufshc
+ - qcom,sm6125-ufshc
then:
properties:
clocks:
@@ -248,7 +278,7 @@ allOf:
reg:
maxItems: 1
clocks:
- minItems: 8
+ minItems: 7
maxItems: 8
else:
properties:
@@ -256,7 +286,7 @@ allOf:
minItems: 1
maxItems: 2
clocks:
- minItems: 8
+ minItems: 7
maxItems: 11
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index b97d298b3eb6..9cab8385108c 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -256,6 +256,8 @@ patternProperties:
description: Catalyst Semiconductor, Inc.
"^cavium,.*":
description: Cavium, Inc.
+ "^cct,.*":
+ description: Crystal Clear Technology Sdn. Bhd.
"^cdns,.*":
description: Cadence Design Systems Inc.
"^cdtech,.*":
@@ -529,6 +531,8 @@ patternProperties:
description: FX Technology Ltd.
"^galaxycore,.*":
description: GalaxyCore Inc.
+ "^gameforce,.*":
+ description: GameForce
"^gardena,.*":
description: GARDENA GmbH
"^gateway,.*":
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 0c153d79ccc4..29abf1eebf9f 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -77,7 +77,7 @@ consider though:
the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
llseek operation will report -EINVAL.
- If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
+ If llseek on dma-buf FDs isn't supported the kernel will report -ESPIPE for all
cases. Userspace can use this to detect support for discovering the dma-buf
size using llseek.
diff --git a/Documentation/driver-api/virtio/writing_virtio_drivers.rst b/Documentation/driver-api/virtio/writing_virtio_drivers.rst
index e14c58796d25..e5de6f5d061a 100644
--- a/Documentation/driver-api/virtio/writing_virtio_drivers.rst
+++ b/Documentation/driver-api/virtio/writing_virtio_drivers.rst
@@ -97,7 +97,6 @@ like this::
static struct virtio_driver virtio_dummy_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_dummy_probe,
.remove = virtio_dummy_remove,
diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst
new file mode 100644
index 000000000000..e2bd61ccd96f
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/index.rst
@@ -0,0 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+bcachefs Documentation
+======================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ errorcodes
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 0ea1e44fa028..1f9b4c905a6a 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -69,6 +69,7 @@ Documentation for filesystem implementations.
afs
autofs
autofs-mount-control
+ bcachefs/index
befs
bfs
btrfs
diff --git a/Documentation/gpu/amdgpu/debugging.rst b/Documentation/gpu/amdgpu/debugging.rst
new file mode 100644
index 000000000000..e75f97d0e4ea
--- /dev/null
+++ b/Documentation/gpu/amdgpu/debugging.rst
@@ -0,0 +1,80 @@
+===============
+ GPU Debugging
+===============
+
+GPUVM Debugging
+===============
+
+To aid in debugging GPU virtual memory related problems, the driver supports a
+number of options module parameters:
+
+`vm_fault_stop` - If non-0, halt the GPU memory controller on a GPU page fault.
+
+`vm_update_mode` - If non-0, use the CPU to update GPU page tables rather than
+the GPU.
+
+
+Decoding a GPUVM Page Fault
+===========================
+
+If you see a GPU page fault in the kernel log, you can decode it to figure
+out what is going wrong in your application. A page fault in your kernel
+log may look something like this:
+
+::
+
+ [gfxhub0] no-retry page fault (src_id:0 ring:24 vmid:3 pasid:32777, for process glxinfo pid 2424 thread glxinfo:cs0 pid 2425)
+ in page starting at address 0x0000800102800000 from IH client 0x1b (UTCL2)
+ VM_L2_PROTECTION_FAULT_STATUS:0x00301030
+ Faulty UTCL2 client ID: TCP (0x8)
+ MORE_FAULTS: 0x0
+ WALKER_ERROR: 0x0
+ PERMISSION_FAULTS: 0x3
+ MAPPING_ERROR: 0x0
+ RW: 0x0
+
+First you have the memory hub, gfxhub and mmhub. gfxhub is the memory
+hub used for graphics, compute, and sdma on some chips. mmhub is the
+memory hub used for multi-media and sdma on some chips.
+
+Next you have the vmid and pasid. If the vmid is 0, this fault was likely
+caused by the kernel driver or firmware. If the vmid is non-0, it is generally
+a fault in a user application. The pasid is used to link a vmid to a system
+process id. If the process is active when the fault happens, the process
+information will be printed.
+
+The GPU virtual address that caused the fault comes next.
+
+The client ID indicates the GPU block that caused the fault.
+Some common client IDs:
+
+- CB/DB: The color/depth backend of the graphics pipe
+- CPF: Command Processor Frontend
+- CPC: Command Processor Compute
+- CPG: Command Processor Graphics
+- TCP/SQC/SQG: Shaders
+- SDMA: SDMA engines
+- VCN: Video encode/decode engines
+- JPEG: JPEG engines
+
+PERMISSION_FAULTS describe what faults were encountered:
+
+- bit 0: the PTE was not valid
+- bit 1: the PTE read bit was not set
+- bit 2: the PTE write bit was not set
+- bit 3: the PTE execute bit was not set
+
+Finally, RW, indicates whether the access was a read (0) or a write (1).
+
+In the example above, a shader (cliend id = TCP) generated a read (RW = 0x0) to
+an invalid page (PERMISSION_FAULTS = 0x3) at GPU virtual address
+0x0000800102800000. The user can then inspect their shader code and resource
+descriptor state to determine what caused the GPU page fault.
+
+UMR
+===
+
+`umr <https://gitlab.freedesktop.org/tomstdenis/umr>`_ is a general purpose
+GPU debugging and diagnostics tool. Please see the umr
+`documentation <https://umr.readthedocs.io/en/main/>`_ for more information
+about its capabilities.
diff --git a/Documentation/gpu/amdgpu/display/display-contributing.rst b/Documentation/gpu/amdgpu/display/display-contributing.rst
index fdb2bea01d53..36f3077eee00 100644
--- a/Documentation/gpu/amdgpu/display/display-contributing.rst
+++ b/Documentation/gpu/amdgpu/display/display-contributing.rst
@@ -135,7 +135,7 @@ Enable underlay
---------------
AMD display has this feature called underlay (which you can read more about at
-'Documentation/GPU/amdgpu/display/mpo-overview.rst') which is intended to
+'Documentation/gpu/amdgpu/display/mpo-overview.rst') which is intended to
save power when playing a video. The basic idea is to put a video in the
underlay plane at the bottom and the desktop in the plane above it with a hole
in the video area. This feature is enabled in ChromeOS, and from our data
diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst
index 912e699fd373..847e04924030 100644
--- a/Documentation/gpu/amdgpu/index.rst
+++ b/Documentation/gpu/amdgpu/index.rst
@@ -15,4 +15,5 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
ras
thermal
driver-misc
+ debugging
amdgpu-glossary
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
index e5070a0e95ab..971cdb4816fc 100644
--- a/Documentation/gpu/driver-uapi.rst
+++ b/Documentation/gpu/driver-uapi.rst
@@ -18,6 +18,11 @@ VM_BIND / EXEC uAPI
.. kernel-doc:: include/uapi/drm/nouveau_drm.h
+drm/panthor uAPI
+================
+
+.. kernel-doc:: include/uapi/drm/panthor_drm.h
+
drm/xe uAPI
===========
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 13d3627d8bc0..abfe220764e1 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -398,6 +398,21 @@ Plane Damage Tracking Functions Reference
.. kernel-doc:: include/drm/drm_damage_helper.h
:internal:
+Plane Panic Feature
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_panic.c
+ :doc: overview
+
+Plane Panic Functions Reference
+-------------------------------
+
+.. kernel-doc:: include/drm/drm_panic.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panic.c
+ :export:
+
Display Modes Function Reference
================================
@@ -496,6 +511,13 @@ addition to the one mentioned above:
* An IGT test must be submitted where reasonable.
+For historical reasons, non-standard, driver-specific properties exist. If a KMS
+driver wants to add support for one of those properties, the requirements for
+new properties apply where possible. Additionally, the documented behavior must
+match the de facto semantics of the existing property to ensure compatibility.
+Developers of the driver that first added the property should help with those
+tasks and must ACK the documented behavior if possible.
+
Property Types and Blob Property Support
----------------------------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 0ca1550fd9dc..17261ba18313 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -204,6 +204,15 @@ DMC Firmware Support
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc.c
:internal:
+DMC wakelock support
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
+ :doc: DMC wakelock support
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
+ :internal:
+
Video BIOS Table (VBT)
----------------------
diff --git a/Documentation/gpu/panfrost.rst b/Documentation/gpu/panfrost.rst
index b80e41f4b2c5..51ba375fd80d 100644
--- a/Documentation/gpu/panfrost.rst
+++ b/Documentation/gpu/panfrost.rst
@@ -38,3 +38,12 @@ the currently possible format options:
Possible `drm-engine-` key names are: `fragment`, and `vertex-tiler`.
`drm-curfreq-` values convey the current operating frequency for that engine.
+
+Users must bear in mind that engine and cycle sampling are disabled by default,
+because of power saving concerns. `fdinfo` users and benchmark applications which
+query the fdinfo file must make sure to toggle the job profiling status of the
+driver by writing into the appropriate sysfs node::
+
+ echo <N> > /sys/bus/platform/drivers/panfrost/[a-f0-9]*.gpu/profiling
+
+Where `N` is either `0` or `1`, depending on the desired enablement status.
diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
index 8a8fcd4fceac..bc26dc126104 100644
--- a/Documentation/gpu/rfc/i915_vm_bind.h
+++ b/Documentation/gpu/rfc/i915_vm_bind.h
@@ -93,12 +93,11 @@ struct drm_i915_gem_timeline_fence {
* Multiple VA mappings can be created to the same section of the object
* (aliasing).
*
- * The @start, @offset and @length must be 4K page aligned. However the DG2
- * and XEHPSDV has 64K page size for device local memory and has compact page
- * table. On those platforms, for binding device local-memory objects, the
- * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
- * the local memory 64K page and the system memory 4K page bindings in the same
- * 2M range.
+ * The @start, @offset and @length must be 4K page aligned. However the DG2 has
+ * 64K page size for device local memory and has compact page table. On that
+ * platform, for binding device local-memory objects, the @start, @offset and
+ * @length must be 64K aligned. Also, UMDs should not mix the local memory 64K
+ * page and the system memory 4K page bindings in the same 2M range.
*
* Error code -EINVAL will be returned if @start, @offset and @length are not
* properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index b1d97fafddcf..bb5c44f8bd1c 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -178,7 +178,7 @@ yet. Bug reports are always welcome at the issue tracker below!
- ``LLVM=1``
* - s390
- Maintained
- - ``CC=clang``
+ - ``LLVM=1`` (LLVM >= 18.1.0), ``CC=clang`` (LLVM < 18.1.0)
* - um (User Mode)
- Maintained
- ``LLVM=1``
diff --git a/Documentation/mm/page_owner.rst b/Documentation/mm/page_owner.rst
index 0d0334cd5179..3a45a20fc05a 100644
--- a/Documentation/mm/page_owner.rst
+++ b/Documentation/mm/page_owner.rst
@@ -24,10 +24,10 @@ fragmentation statistics can be obtained through gfp flag information of
each page. It is already implemented and activated if page owner is
enabled. Other usages are more than welcome.
-It can also be used to show all the stacks and their outstanding
-allocations, which gives us a quick overview of where the memory is going
-without the need to screen through all the pages and match the allocation
-and free operation.
+It can also be used to show all the stacks and their current number of
+allocated base pages, which gives us a quick overview of where the memory
+is going without the need to screen through all the pages and match the
+allocation and free operation.
page owner is disabled by default. So, if you'd like to use it, you need
to add "page_owner=on" to your boot cmdline. If the kernel is built
@@ -75,42 +75,45 @@ Usage
cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt
cat stacks.txt
- prep_new_page+0xa9/0x120
- get_page_from_freelist+0x7e6/0x2140
- __alloc_pages+0x18a/0x370
- new_slab+0xc8/0x580
- ___slab_alloc+0x1f2/0xaf0
- __slab_alloc.isra.86+0x22/0x40
- kmem_cache_alloc+0x31b/0x350
- __khugepaged_enter+0x39/0x100
- dup_mmap+0x1c7/0x5ce
- copy_process+0x1afe/0x1c90
- kernel_clone+0x9a/0x3c0
- __do_sys_clone+0x66/0x90
- do_syscall_64+0x7f/0x160
- entry_SYSCALL_64_after_hwframe+0x6c/0x74
- stack_count: 234
+ post_alloc_hook+0x177/0x1a0
+ get_page_from_freelist+0xd01/0xd80
+ __alloc_pages+0x39e/0x7e0
+ allocate_slab+0xbc/0x3f0
+ ___slab_alloc+0x528/0x8a0
+ kmem_cache_alloc+0x224/0x3b0
+ sk_prot_alloc+0x58/0x1a0
+ sk_alloc+0x32/0x4f0
+ inet_create+0x427/0xb50
+ __sock_create+0x2e4/0x650
+ inet_ctl_sock_create+0x30/0x180
+ igmp_net_init+0xc1/0x130
+ ops_init+0x167/0x410
+ setup_net+0x304/0xa60
+ copy_net_ns+0x29b/0x4a0
+ create_new_namespaces+0x4a1/0x820
+ nr_base_pages: 16
...
...
echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold
cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt
cat stacks_7000.txt
- prep_new_page+0xa9/0x120
- get_page_from_freelist+0x7e6/0x2140
- __alloc_pages+0x18a/0x370
- alloc_pages_mpol+0xdf/0x1e0
- folio_alloc+0x14/0x50
- filemap_alloc_folio+0xb0/0x100
- page_cache_ra_unbounded+0x97/0x180
- filemap_fault+0x4b4/0x1200
- __do_fault+0x2d/0x110
- do_pte_missing+0x4b0/0xa30
- __handle_mm_fault+0x7fa/0xb70
- handle_mm_fault+0x125/0x300
- do_user_addr_fault+0x3c9/0x840
- exc_page_fault+0x68/0x150
- asm_exc_page_fault+0x22/0x30
- stack_count: 8248
+ post_alloc_hook+0x177/0x1a0
+ get_page_from_freelist+0xd01/0xd80
+ __alloc_pages+0x39e/0x7e0
+ alloc_pages_mpol+0x22e/0x490
+ folio_alloc+0xd5/0x110
+ filemap_alloc_folio+0x78/0x230
+ page_cache_ra_order+0x287/0x6f0
+ filemap_get_pages+0x517/0x1160
+ filemap_read+0x304/0x9f0
+ xfs_file_buffered_read+0xe6/0x1d0 [xfs]
+ xfs_file_read_iter+0x1f0/0x380 [xfs]
+ __kernel_read+0x3b9/0x730
+ kernel_read_file+0x309/0x4d0
+ __do_sys_finit_module+0x381/0x730
+ do_syscall_64+0x8d/0x150
+ entry_SYSCALL_64_after_hwframe+0x62/0x6a
+ nr_base_pages: 20824
...
cat /sys/kernel/debug/page_owner > page_owner_full.txt
diff --git a/Documentation/networking/devlink/devlink-eswitch-attr.rst b/Documentation/networking/devlink/devlink-eswitch-attr.rst
new file mode 100644
index 000000000000..08bb39ab1528
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-eswitch-attr.rst
@@ -0,0 +1,76 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Devlink E-Switch Attribute
+==========================
+
+Devlink E-Switch supports two modes of operation: legacy and switchdev.
+Legacy mode operates based on traditional MAC/VLAN steering rules. Switching
+decisions are made based on MAC addresses, VLANs, etc. There is limited ability
+to offload switching rules to hardware.
+
+On the other hand, switchdev mode allows for more advanced offloading
+capabilities of the E-Switch to hardware. In switchdev mode, more switching
+rules and logic can be offloaded to the hardware switch ASIC. It enables
+representor netdevices that represent the slow path of virtual functions (VFs)
+or scalable-functions (SFs) of the device. See more information about
+:ref:`Documentation/networking/switchdev.rst <switchdev>` and
+:ref:`Documentation/networking/representors.rst <representors>`.
+
+In addition, the devlink E-Switch also comes with other attributes listed
+in the following section.
+
+Attributes Description
+======================
+
+The following is a list of E-Switch attributes.
+
+.. list-table:: E-Switch attributes
+ :widths: 8 5 45
+
+ * - Name
+ - Type
+ - Description
+ * - ``mode``
+ - enum
+ - The mode of the device. The mode can be one of the following:
+
+ * ``legacy`` operates based on traditional MAC/VLAN steering
+ rules.
+ * ``switchdev`` allows for more advanced offloading capabilities of
+ the E-Switch to hardware.
+ * - ``inline-mode``
+ - enum
+ - Some HWs need the VF driver to put part of the packet
+ headers on the TX descriptor so the e-switch can do proper
+ matching and steering. Support for both switchdev mode and legacy mode.
+
+ * ``none`` none.
+ * ``link`` L2 mode.
+ * ``network`` L3 mode.
+ * ``transport`` L4 mode.
+ * - ``encap-mode``
+ - enum
+ - The encapsulation mode of the device. Support for both switchdev mode
+ and legacy mode. The mode can be one of the following:
+
+ * ``none`` Disable encapsulation support.
+ * ``basic`` Enable encapsulation support.
+
+Example Usage
+=============
+
+.. code:: shell
+
+ # enable switchdev mode
+ $ devlink dev eswitch set pci/0000:08:00.0 mode switchdev
+
+ # set inline-mode and encap-mode
+ $ devlink dev eswitch set pci/0000:08:00.0 inline-mode none encap-mode basic
+
+ # display devlink device eswitch attributes
+ $ devlink dev eswitch show pci/0000:08:00.0
+ pci/0000:08:00.0: mode switchdev inline-mode none encap-mode basic
+
+ # enable encap-mode with legacy mode
+ $ devlink dev eswitch set pci/0000:08:00.0 mode legacy inline-mode none encap-mode basic
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index e14d7a701b72..948c8c44e233 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -67,6 +67,7 @@ general.
devlink-selftests
devlink-trap
devlink-linecard
+ devlink-eswitch-attr
Driver-specific documentation
-----------------------------
diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
index decb39c19b9e..5e23386f6968 100644
--- a/Documentation/networking/representors.rst
+++ b/Documentation/networking/representors.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: GPL-2.0
+.. _representors:
=============================
Network Function Representors
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index bb2100228cc7..6e9a4597bf2c 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -252,7 +252,7 @@ an involved disclosed party. The current ambassadors list:
AMD Tom Lendacky <thomas.lendacky@amd.com>
Ampere Darren Hart <darren@os.amperecomputing.com>
ARM Catalin Marinas <catalin.marinas@arm.com>
- IBM Power Anton Blanchard <anton@linux.ibm.com>
+ IBM Power Michael Ellerman <ellerman@au.ibm.com>
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <quic_tsoni@quicinc.com>
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 5c4fa9f5d1cd..c9137710633a 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -16,7 +16,7 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
Architecture Level of support Constraints
============= ================ ==============================================
``arm64`` Maintained Little Endian only.
-``loongarch`` Maintained -
+``loongarch`` Maintained \-
``um`` Maintained ``x86_64`` only.
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================
diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst
index f8786be15183..7fe8ef9718d8 100644
--- a/Documentation/timers/no_hz.rst
+++ b/Documentation/timers/no_hz.rst
@@ -129,11 +129,8 @@ adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain
online to handle timekeeping tasks in order to ensure that system
calls like gettimeofday() returns accurate values on adaptive-tick CPUs.
(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running
-user processes to observe slight drifts in clock rate.) Therefore, the
-boot CPU is prohibited from entering adaptive-ticks mode. Specifying a
-"nohz_full=" mask that includes the boot CPU will result in a boot-time
-error message, and the boot CPU will be removed from the mask. Note that
-this means that your system must have at least two CPUs in order for
+user processes to observe slight drifts in clock rate.) Note that this
+means that your system must have at least two CPUs in order for
CONFIG_NO_HZ_FULL=y to do anything for you.
Finally, adaptive-ticks CPUs must have their RCU callbacks offloaded.
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 995780088eb2..84335d119ff1 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -46,21 +46,16 @@ SEV hardware uses ASIDs to associate a memory encryption key with a VM.
Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
defined in the CPUID 0x8000001f[ecx] field.
-SEV Key Management
-==================
+The KVM_MEMORY_ENCRYPT_OP ioctl
+===============================
-The SEV guest key management is handled by a separate processor called the AMD
-Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
-key management interface to perform common hypervisor activities such as
-encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
-information, see the SEV Key Management spec [api-spec]_
-
-The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP. If the argument
-to KVM_MEMORY_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
-and ``ENOTTY`` if it is disabled (on some older versions of Linux,
-the ioctl runs normally even with a NULL argument, and therefore will
-likely return ``EFAULT``). If non-NULL, the argument to KVM_MEMORY_ENCRYPT_OP
-must be a struct kvm_sev_cmd::
+The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP, which operates on
+the VM file descriptor. If the argument to KVM_MEMORY_ENCRYPT_OP is NULL,
+the ioctl returns 0 if SEV is enabled and ``ENOTTY`` if it is disabled
+(on some older versions of Linux, the ioctl tries to run normally even
+with a NULL argument, and therefore will likely return ``EFAULT`` instead
+of zero if SEV is enabled). If non-NULL, the argument to
+KVM_MEMORY_ENCRYPT_OP must be a struct kvm_sev_cmd::
struct kvm_sev_cmd {
__u32 id;
@@ -87,10 +82,6 @@ guests, such as launching, running, snapshotting, migrating and decommissioning.
The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
context. In a typical workflow, this command should be the first command issued.
-The firmware can be initialized either by using its own non-volatile storage or
-the OS can manage the NV storage for the firmware using the module parameter
-``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
-is invalid, the OS will create or override the file with output from PSP.
Returns: 0 on success, -negative on error
@@ -434,6 +425,21 @@ issued by the hypervisor to make the guest ready for execution.
Returns: 0 on success, -negative on error
+Firmware Management
+===================
+
+The SEV guest key management is handled by a separate processor called the AMD
+Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
+key management interface to perform common hypervisor activities such as
+encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
+information, see the SEV Key Management spec [api-spec]_
+
+The AMD-SP firmware can be initialized either by using its own non-volatile
+storage or the OS can manage the NV storage for the firmware using
+parameter ``init_ex_path`` of the ``ccp`` module. If the file specified
+by ``init_ex_path`` does not exist or is invalid, the OS will create or
+override the file with PSP non-volatile storage.
+
References
==========
diff --git a/Documentation/virt/kvm/x86/msr.rst b/Documentation/virt/kvm/x86/msr.rst
index 9315fc385fb0..3aecf2a70e7b 100644
--- a/Documentation/virt/kvm/x86/msr.rst
+++ b/Documentation/virt/kvm/x86/msr.rst
@@ -193,8 +193,8 @@ data:
Asynchronous page fault (APF) control MSR.
Bits 63-6 hold 64-byte aligned physical address of a 64 byte memory area
- which must be in guest RAM and must be zeroed. This memory is expected
- to hold a copy of the following structure::
+ which must be in guest RAM. This memory is expected to hold the
+ following structure::
struct kvm_vcpu_pv_apf_data {
/* Used for 'page not present' events delivered via #PF */
@@ -204,7 +204,6 @@ data:
__u32 token;
__u8 pad[56];
- __u32 enabled;
};
Bits 5-4 of the MSR are reserved and should be zero. Bit 0 is set to 1
@@ -232,14 +231,14 @@ data:
as regular page fault, guest must reset 'flags' to '0' before it does
something that can generate normal page fault.
- Bytes 5-7 of 64 byte memory location ('token') will be written to by the
+ Bytes 4-7 of 64 byte memory location ('token') will be written to by the
hypervisor at the time of APF 'page ready' event injection. The content
- of these bytes is a token which was previously delivered as 'page not
- present' event. The event indicates the page in now available. Guest is
- supposed to write '0' to 'token' when it is done handling 'page ready'
- event and to write 1' to MSR_KVM_ASYNC_PF_ACK after clearing the location;
- writing to the MSR forces KVM to re-scan its queue and deliver the next
- pending notification.
+ of these bytes is a token which was previously delivered in CR2 as
+ 'page not present' event. The event indicates the page is now available.
+ Guest is supposed to write '0' to 'token' when it is done handling
+ 'page ready' event and to write '1' to MSR_KVM_ASYNC_PF_ACK after
+ clearing the location; writing to the MSR forces KVM to re-scan its
+ queue and deliver the next pending notification.
Note, MSR_KVM_ASYNC_PF_INT MSR specifying the interrupt vector for 'page
ready' APF delivery needs to be written to before enabling APF mechanism
diff --git a/MAINTAINERS b/MAINTAINERS
index 161ded7f0ec0..d7cfe9895a44 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1671,7 +1671,7 @@ F: drivers/soc/versatile/
ARM KOMEDA DRM-KMS DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,komeda.yaml
F: Documentation/gpu/komeda-kms.rst
F: drivers/gpu/drm/arm/display/include/
@@ -1683,15 +1683,26 @@ M: Rob Herring <robh@kernel.org>
R: Steven Price <steven.price@arm.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
+ARM MALI PANTHOR DRM DRIVER
+M: Boris Brezillon <boris.brezillon@collabora.com>
+M: Steven Price <steven.price@arm.com>
+M: Liviu Dudau <liviu.dudau@arm.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
+F: drivers/gpu/drm/panthor/
+F: include/uapi/drm/panthor_drm.h
+
ARM MALI-DP DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,malidp.yaml
F: Documentation/gpu/afbc.rst
F: drivers/gpu/drm/arm/
@@ -2191,7 +2202,6 @@ N: mxs
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
-M: Li Yang <leoyang.li@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2708,7 +2718,7 @@ F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
@@ -3573,6 +3583,7 @@ S: Supported
C: irc://irc.oftc.net/bcache
T: git https://evilpiepirate.org/git/bcachefs.git
F: fs/bcachefs/
+F: Documentation/filesystems/bcachefs/
BDISP ST MEDIA DRIVER
M: Fabien Dessenne <fabien.dessenne@foss.st.com>
@@ -3942,8 +3953,7 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
-R: Florent Revest <revest@chromium.org>
-R: Brendan Jackman <jackmanb@chromium.org>
+R: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
@@ -3968,7 +3978,7 @@ F: kernel/bpf/bpf_lru*
F: kernel/bpf/cgroup.c
BPF [TOOLING] (bpftool)
-M: Quentin Monnet <quentin@isovalent.com>
+M: Quentin Monnet <qmo@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/disasm.*
@@ -4870,7 +4880,6 @@ F: drivers/power/supply/cw2015_battery.c
CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com>
M: Xiubo Li <xiubli@redhat.com>
-R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -4882,7 +4891,6 @@ F: net/ceph/
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
M: Xiubo Li <xiubli@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com>
-R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
@@ -5558,7 +5566,7 @@ F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVER - ARM EXYNOS
M: Daniel Lezcano <daniel.lezcano@linaro.org>
M: Kukjin Kim <kgene@kernel.org>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -6157,7 +6165,6 @@ DEVICE-MAPPER (LVM)
M: Alasdair Kergon <agk@redhat.com>
M: Mike Snitzer <snitzer@kernel.org>
M: Mikulas Patocka <mpatocka@redhat.com>
-M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
Q: http://patchwork.kernel.org/project/dm-devel/list/
@@ -6173,7 +6180,6 @@ F: include/uapi/linux/dm-*.h
DEVICE-MAPPER VDO TARGET
M: Matthew Sakai <msakai@redhat.com>
-M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
F: Documentation/admin-guide/device-mapper/vdo*.rst
@@ -6315,7 +6321,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/dma-buf.rst
F: Documentation/userspace-api/dma-buf-alloc-exchange.rst
F: drivers/dma-buf/
@@ -6369,7 +6375,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
@@ -6578,7 +6584,7 @@ M: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
M: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/accel/ivpu/
F: include/uapi/drm/ivpu_accel.h
@@ -6598,18 +6604,18 @@ M: Chen-Yu Tsai <wens@csie.org>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/sun4i/sun8i*
DRM DRIVER FOR ARM PL111 CLCD
S: Orphan
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/pl111/
DRM DRIVER FOR ARM VERSATILE TFT PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
F: drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -6617,7 +6623,7 @@ DRM DRIVER FOR ASPEED BMC GFX
M: Joel Stanley <joel@jms.id.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
F: drivers/gpu/drm/aspeed/
@@ -6627,14 +6633,14 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/bochs.c
DRM DRIVER FOR BOE HIMAX8279D PANELS
@@ -6652,14 +6658,14 @@ F: drivers/gpu/drm/bridge/chipone-icn6211.c
DRM DRIVER FOR EBBG FT8719 PANEL
M: Joel Selvaraj <jo@jsfamily.in>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/ebbg,ft8719.yaml
F: drivers/gpu/drm/panel/panel-ebbg-ft8719.c
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tve200/
DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
@@ -6679,7 +6685,7 @@ M: Thomas Zimmermann <tzimmermann@suse.de>
M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_aperture.c
F: drivers/gpu/drm/tiny/ofdrm.c
F: drivers/gpu/drm/tiny/simpledrm.c
@@ -6698,27 +6704,27 @@ DRM DRIVER FOR GENERIC USB DISPLAY
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/gud/wiki
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gud/
F: include/drm/gud.h
DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
M: Hans de Goede <hdegoede@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/gm12u320.c
DRM DRIVER FOR HIMAX HX8394 MIPI-DSI LCD panels
M: Ondrej Jirman <megi@xff.cz>
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
F: drivers/gpu/drm/panel/panel-himax-hx8394.c
DRM DRIVER FOR HX8357D PANELS
S: Orphan
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
F: drivers/gpu/drm/tiny/hx8357d.c
@@ -6727,20 +6733,20 @@ M: Deepak Rawat <drawat.floss@gmail.com>
L: linux-hyperv@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/hyperv
DRM DRIVER FOR ILITEK ILI9225 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
F: drivers/gpu/drm/tiny/ili9225.c
DRM DRIVER FOR ILITEK ILI9486 PANELS
M: Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
F: drivers/gpu/drm/tiny/ili9486.c
@@ -6756,17 +6762,25 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+DRM DRIVER FOR LG SW43408 PANELS
+M: Sumit Semwal <sumit.semwal@linaro.org>
+M: Caleb Connolly <caleb.connolly@linaro.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
+F: drivers/gpu/drm/panel/panel-lg-sw43408.c
+
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/logicvc/
DRM DRIVER FOR LVDS PANELS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/lvds.yaml
F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
F: drivers/gpu/drm/panel/panel-lvds.c
@@ -6784,13 +6798,13 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR MI0283QT
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
F: drivers/gpu/drm/tiny/mi0283qt.c
@@ -6798,7 +6812,7 @@ DRM DRIVER FOR MIPI DBI compatible panels
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/panel-mipi-dbi/wiki
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
F: drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -6840,28 +6854,28 @@ F: include/uapi/drm/msm_drm.h
DRM DRIVER FOR NOVATEK NT35510 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
DRM DRIVER FOR NOVATEK NT35560 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
DRM DRIVER FOR NOVATEK NT36523 PANELS
M: Jianhua Lu <lujianhua000@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36523.c
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -6895,7 +6909,7 @@ F: drivers/gpu/drm/bridge/parade-ps8640.c
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/repaper.txt
F: drivers/gpu/drm/tiny/repaper.c
@@ -6905,7 +6919,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Obsolete
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/cirrus.c
DRM DRIVER FOR QXL VIRTUAL GPU
@@ -6914,7 +6928,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
L: spice-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
@@ -6927,7 +6941,7 @@ F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR SAMSUNG DB7430 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -6936,7 +6950,7 @@ M: Inki Dae <inki.dae@samsung.com>
M: Jagan Teki <jagan@amarulasolutions.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
F: drivers/gpu/drm/bridge/samsung-dsim.c
F: include/drm/bridge/samsung-dsim.h
@@ -6956,7 +6970,7 @@ F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
DRM DRIVER FOR SITRONIX ST7586 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7586.txt
F: drivers/gpu/drm/tiny/st7586.c
@@ -6977,14 +6991,14 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
DRM DRIVER FOR SITRONIX ST7735R PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
F: drivers/gpu/drm/tiny/st7735r.c
DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/solomon,ssd-common.yaml
F: Documentation/devicetree/bindings/display/solomon,ssd13*.yaml
F: drivers/gpu/drm/solomon/ssd130x*
@@ -6992,7 +7006,7 @@ F: drivers/gpu/drm/solomon/ssd130x*
DRM DRIVER FOR ST-ERICSSON MCDE
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ste,mcde.yaml
F: drivers/gpu/drm/mcde/
@@ -7016,7 +7030,7 @@ F: drivers/gpu/drm/bridge/ti-sn65dsi86.c
DRM DRIVER FOR TPO TPG110 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
F: drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -7026,7 +7040,7 @@ R: Sean Paul <sean@poorly.run>
R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
@@ -7037,7 +7051,7 @@ R: Haneen Mohammed <hamohammed.sa@gmail.com>
R: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vkms.rst
F: drivers/gpu/drm/vkms/
@@ -7045,7 +7059,7 @@ DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
@@ -7053,14 +7067,14 @@ M: Zack Rusin <zack.rusin@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vmwgfx/
F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVER FOR WIDECHIPS WS2401 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -7085,8 +7099,8 @@ M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
M: Maxime Ripard <mripard@kernel.org>
M: Thomas Zimmermann <tzimmermann@suse.de>
S: Maintained
-W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
-T: git git://anongit.freedesktop.org/drm/drm-misc
+W: https://drm.pages.freedesktop.org/maintainer-tools/drm-misc.html
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
@@ -7113,7 +7127,7 @@ M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/allwinner*
F: drivers/gpu/drm/sun4i/
@@ -7123,7 +7137,7 @@ L: dri-devel@lists.freedesktop.org
L: linux-amlogic@lists.infradead.org
S: Supported
W: http://linux-meson.com/
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
F: Documentation/gpu/meson.rst
@@ -7135,7 +7149,7 @@ M: Sam Ravnborg <sam@ravnborg.org>
M: Boris Brezillon <bbrezillon@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/atmel/
F: drivers/gpu/drm/atmel-hlcdc/
@@ -7147,7 +7161,7 @@ R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
F: drivers/gpu/drm/drm_bridge.c
@@ -7172,7 +7186,7 @@ M: Stefan Agner <stefan@agner.ch>
M: Alison Wang <alison.wang@nxp.com>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
F: drivers/gpu/drm/fsl-dcu/
@@ -7181,7 +7195,7 @@ DRM DRIVERS FOR FREESCALE IMX 5/6
M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
T: git git://git.pengutronix.de/git/pza/linux
F: Documentation/devicetree/bindings/display/imx/
F: drivers/gpu/drm/imx/ipuv3/
@@ -7201,7 +7215,7 @@ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
@@ -7213,7 +7227,7 @@ R: Yongqin Liu <yongqin.liu@linaro.org>
R: John Stultz <jstultz@google.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/hisilicon/
F: drivers/gpu/drm/hisilicon/
@@ -7222,7 +7236,7 @@ M: Qiang Yu <yuq825@gmail.com>
L: dri-devel@lists.freedesktop.org
L: lima@lists.freedesktop.org (moderated for non-subscribers)
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/lima/
F: include/uapi/drm/lima_drm.h
@@ -7230,7 +7244,7 @@ DRM DRIVERS FOR LOONGSON
M: Sui Jingfeng <suijingfeng@loongson.cn>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/loongson/
DRM DRIVERS FOR MEDIATEK
@@ -7278,7 +7292,7 @@ M: Biju Das <biju.das.jz@bp.renesas.com>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
F: drivers/gpu/drm/renesas/rz-du/
@@ -7288,7 +7302,7 @@ M: Geert Uytterhoeven <geert+renesas@glider.be>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
F: drivers/gpu/drm/renesas/shmobile/
F: include/linux/platform_data/shmob_drm.h
@@ -7299,7 +7313,7 @@ M: Heiko Stübner <heiko@sntech.de>
M: Andy Yan <andy.yan@rock-chips.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/ci/xfails/rockchip*
F: drivers/gpu/drm/rockchip/
@@ -7308,7 +7322,7 @@ DRM DRIVERS FOR STI
M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
F: drivers/gpu/drm/sti
@@ -7318,7 +7332,7 @@ M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
F: drivers/gpu/drm/stm
@@ -7327,7 +7341,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -7338,7 +7352,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
@@ -7346,7 +7360,7 @@ DRM DRIVERS FOR TI OMAP
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/
F: drivers/gpu/drm/omapdrm/
@@ -7354,7 +7368,7 @@ DRM DRIVERS FOR V3D
M: Melissa Wen <mwen@igalia.com>
M: Maíra Canal <mcanal@igalia.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
F: drivers/gpu/drm/v3d/
F: include/uapi/drm/v3d_drm.h
@@ -7363,7 +7377,7 @@ DRM DRIVERS FOR VC4
M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
@@ -7384,15 +7398,16 @@ M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
L: dri-devel@lists.freedesktop.org
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/xen-front.rst
F: drivers/gpu/drm/xen/
DRM DRIVERS FOR XILINX
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
@@ -7401,7 +7416,7 @@ M: Luben Tuikov <ltuikov89@gmail.com>
M: Matthew Brost <matthew.brost@intel.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/scheduler/
F: include/drm/gpu_scheduler.h
@@ -7411,7 +7426,7 @@ R: Jessica Zhang <quic_jesszhan@quicinc.com>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/
F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
@@ -7421,7 +7436,7 @@ DRM PRIVACY-SCREEN CLASS
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_privacy_screen*
F: include/drm/drm_privacy_screen*
@@ -7430,7 +7445,7 @@ M: Christian Koenig <christian.koenig@amd.com>
M: Huang Rui <ray.huang@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
@@ -7438,7 +7453,7 @@ DRM AUTOMATED TESTING
M: Helen Koike <helen.koike@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/automated_testing.rst
F: drivers/gpu/drm/ci/
@@ -7852,9 +7867,8 @@ W: http://aeschi.ch.eu.org/efs/
F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Douglas Miller <dougmill@linux.ibm.com>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/ibm/ehea/
ELM327 CAN NETWORK DRIVER
@@ -7959,6 +7973,7 @@ M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org>
R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
+R: Sandeep Dhavale <dhavale@google.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -8503,7 +8518,7 @@ F: arch/x86/math-emu/
FRAMEBUFFER CORE
M: Daniel Vetter <daniel@ffwll.ch>
S: Odd Fixes
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/video/fbdev/core/
FRAMEBUFFER LAYER
@@ -8543,7 +8558,6 @@ S: Maintained
F: drivers/video/fbdev/fsl-diu-fb.*
FREESCALE DMA DRIVER
-M: Li Yang <leoyang.li@nxp.com>
M: Zhang Wei <zw@zh-kernel.org>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@@ -8708,10 +8722,9 @@ F: drivers/soc/fsl/qe/tsa.h
F: include/dt-bindings/soc/cpm1-fsl,tsa.h
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
-M: Li Yang <leoyang.li@nxp.com>
L: netdev@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC HDLC DRIVER
@@ -8728,10 +8741,9 @@ S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
@@ -8765,17 +8777,15 @@ F: Documentation/devicetree/bindings/sound/fsl,qmc-audio.yaml
F: sound/soc/fsl/fsl_qmc_audio.c
FREESCALE USB PERIPHERAL DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/usb/gadget/udc/fsl*
FREESCALE USB PHY DRIVER
-M: Ran Wang <ran.wang_1@nxp.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: drivers/usb/phy/phy-fsl-usb*
FREEVXFS FILESYSTEM
@@ -9020,7 +9030,7 @@ F: drivers/i2c/muxes/i2c-mux-gpio.c
F: include/linux/platform_data/i2c-mux-gpio.h
GENERIC GPIO RESET DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: drivers/reset/reset-gpio.c
@@ -9603,7 +9613,7 @@ F: kernel/power/
HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org>
-M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-input@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@@ -9671,7 +9681,9 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-logitech-hidpp.c
-HIGH-RESOLUTION TIMERS, CLOCKEVENTS
+HIGH-RESOLUTION TIMERS, TIMER WHEEL, CLOCKEVENTS
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
+M: Frederic Weisbecker <frederic@kernel.org>
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -9679,9 +9691,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: Documentation/timers/
F: include/linux/clockchips.h
F: include/linux/hrtimer.h
+F: include/linux/timer.h
F: kernel/time/clockevents.c
F: kernel/time/hrtimer.c
-F: kernel/time/timer_*.c
+F: kernel/time/timer.c
+F: kernel/time/timer_list.c
+F: kernel/time/timer_migration.*
+F: tools/testing/selftests/timers/
HIGH-SPEED SCC DRIVER FOR AX.25
L: linux-hams@vger.kernel.org
@@ -10044,7 +10060,7 @@ F: drivers/media/platform/st/sti/hva
HWPOISON MEMORY FAILURE HANDLING
M: Miaohe Lin <linmiaohe@huawei.com>
-R: Naoya Horiguchi <naoya.horiguchi@nec.com>
+R: Naoya Horiguchi <nao.horiguchi@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: mm/hwpoison-inject.c
@@ -10604,7 +10620,7 @@ IMGTEC POWERVR DRM DRIVER
M: Frank Binns <frank.binns@imgtec.com>
M: Matt Coster <matt.coster@imgtec.com>
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
@@ -11385,7 +11401,7 @@ IOSYS-MAP HELPERS
M: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/iosys-map.h
IO_URING
@@ -11578,7 +11594,7 @@ ITE IT66121 HDMI BRIDGE DRIVER
M: Phong LE <ple@baylibre.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
F: drivers/gpu/drm/bridge/ite-it66121.c
@@ -12015,7 +12031,7 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-M: James Bottomley <jejb@linux.ibm.com>
+M: James Bottomley <James.Bottomley@HansenPartnership.com>
M: Jarkko Sakkinen <jarkko@kernel.org>
M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
@@ -13152,6 +13168,7 @@ F: drivers/net/ethernet/marvell/mvpp2/
MARVELL MWIFIEX WIRELESS DRIVER
M: Brian Norris <briannorris@chromium.org>
+R: Francesco Dolcini <francesco@dolcini.it>
L: linux-wireless@vger.kernel.org
S: Odd Fixes
F: drivers/net/wireless/marvell/mwifiex/
@@ -13308,7 +13325,7 @@ F: drivers/iio/adc/max11205.c
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
R: Iskren Chernev <iskren.chernev@gmail.com>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Matheus Castello <matheus@castello.eng.br>
L: linux-pm@vger.kernel.org
@@ -13318,7 +13335,7 @@ F: drivers/power/supply/max17040_battery.c
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
R: Hans de Goede <hdegoede@redhat.com>
-R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R: Krzysztof Kozlowski <krzk@kernel.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
R: Purism Kernel Team <kernel@puri.sm>
@@ -13376,7 +13393,7 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
F: drivers/power/supply/max77976_charger.c
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
B: mailto:linux-samsung-soc@vger.kernel.org
@@ -13387,7 +13404,7 @@ F: drivers/power/supply/max77693_charger.c
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
M: Chanwoo Choi <cw00.choi@samsung.com>
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
B: mailto:linux-samsung-soc@vger.kernel.org
@@ -14032,6 +14049,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
MELLANOX ETHERNET DRIVER (mlx5e)
M: Saeed Mahameed <saeedm@nvidia.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@@ -14099,6 +14117,7 @@ F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX5 core VPI driver
M: Saeed Mahameed <saeedm@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
@@ -14169,7 +14188,7 @@ F: mm/mm_init.c
F: tools/testing/memblock/
MEMORY CONTROLLER DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
B: mailto:krzysztof.kozlowski@linaro.org
@@ -14374,7 +14393,7 @@ F: drivers/dma/at_xdmac.c
F: include/dt-bindings/dma/at91.h
MICROCHIP AT91 SERIAL DRIVER
-M: Richard Genoud <richard.genoud@gmail.com>
+M: Richard Genoud <richard.genoud@bootlin.com>
S: Maintained
F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
F: drivers/tty/serial/atmel_serial.c
@@ -14580,6 +14599,14 @@ S: Supported
F: Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
F: drivers/pwm/pwm-atmel.c
+MICROCHIP SAM9x7-COMPATIBLE LVDS CONTROLLER
+M: Manikandan Muralidharan <manikandan.m@microchip.com>
+M: Dharma Balasubiramani <dharma.b@microchip.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+F: Documentation/devicetree/bindings/display/bridge/microchip,sam9x75-lvds.yaml
+F: drivers/gpu/drm/bridge/microchip-lvds.c
+
MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
L: linux-iio@vger.kernel.org
@@ -15167,7 +15194,7 @@ M: Marek Vasut <marex@denx.de>
M: Stefan Agner <stefan@agner.ch>
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,lcdif.yaml
F: drivers/gpu/drm/mxsfb/
@@ -15550,7 +15577,7 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
@@ -15645,9 +15672,10 @@ F: drivers/misc/nsm.c
F: include/uapi/linux/nsm.h
NOHZ, DYNTICKS SUPPORT
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
M: Frederic Weisbecker <frederic@kernel.org>
-M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@kernel.org>
+M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/nohz
@@ -15887,7 +15915,7 @@ M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
R: Lucas Stach <l.stach@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
F: drivers/gpu/drm/imx/dcss/
@@ -15926,7 +15954,7 @@ F: Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
F: drivers/regulator/pf8x00-regulator.c
NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -16537,7 +16565,7 @@ K: of_overlay_remove
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh@kernel.org>
-M: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
+M: Krzysztof Kozlowski <krzk+dt@kernel.org>
M: Conor Dooley <conor+dt@kernel.org>
L: devicetree@vger.kernel.org
S: Maintained
@@ -16743,9 +16771,9 @@ F: include/uapi/linux/ppdev.h
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
-R: Ajay Kaher <akaher@vmware.com>
-R: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+R: Ajay Kaher <ajay.kaher@broadcom.com>
+R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
@@ -16816,12 +16844,6 @@ S: Maintained
F: drivers/leds/leds-pca9532.c
F: include/linux/leds-pca9532.h
-PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: drivers/i2c/muxes/i2c-mux-pca9541.c
-
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
M: Pali Rohár <pali@kernel.org>
@@ -16984,7 +17006,6 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
S: Maintained
@@ -17495,7 +17516,7 @@ F: Documentation/devicetree/bindings/pinctrl/renesas,*
F: drivers/pinctrl/renesas/
PIN CONTROLLER - SAMSUNG
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -17608,15 +17629,20 @@ F: drivers/pnp/
F: include/linux/pnp.h
POSIX CLOCKS and TIMERS
+M: Anna-Maria Behnsen <anna-maria@linutronix.de>
+M: Frederic Weisbecker <frederic@kernel.org>
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: fs/timerfd.c
F: include/linux/time_namespace.h
-F: include/linux/timer*
+F: include/linux/timerfd.h
+F: include/uapi/linux/time.h
+F: include/uapi/linux/timerfd.h
F: include/trace/events/timer*
-F: kernel/time/*timer*
+F: kernel/time/itimer.c
+F: kernel/time/posix-*
F: kernel/time/namespace.c
POWER MANAGEMENT CORE
@@ -17886,7 +17912,7 @@ F: Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
F: drivers/media/rc/pwm-ir-tx.c
PWM SUBSYSTEM
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+M: Uwe Kleine-König <ukleinek@kernel.org>
L: linux-pwm@vger.kernel.org
S: Maintained
Q: https://patchwork.ozlabs.org/project/linux-pwm/list/
@@ -18186,7 +18212,7 @@ R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/accel/qaic/
F: drivers/accel/qaic/
F: include/uapi/drm/qaic_accel.h
@@ -18663,18 +18689,21 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw88/
REALTEK WIRELESS DRIVER (rtw89)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw89/
REDPINE WIRELESS DRIVER
@@ -18745,13 +18774,24 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
-RENESAS ETHERNET DRIVERS
+RENESAS ETHERNET AVB DRIVER
R: Sergey Shtylyov <s.shtylyov@omp.ru>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
-F: Documentation/devicetree/bindings/net/renesas,*.yaml
-F: drivers/net/ethernet/renesas/
-F: include/linux/sh_eth.h
+F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/ravb*
+
+RENESAS ETHERNET SWITCH DRIVER
+R: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/net/renesas,*ether-switch.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/rcar_gen4*
+F: drivers/net/ethernet/renesas/rswitch*
RENESAS IDT821034 ASoC CODEC
M: Herve Codina <herve.codina@bootlin.com>
@@ -18861,6 +18901,16 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
F: drivers/i2c/busses/i2c-rzv2m.c
+RENESAS SUPERH ETHERNET DRIVER
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/net/renesas,ether.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/sh_eth*
+F: include/linux/sh_eth.h
+
RENESAS USB PHY DRIVER
M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-renesas-soc@vger.kernel.org
@@ -19197,12 +19247,14 @@ M: Hin-Tak Leung <hintak.leung@gmail.com>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
+T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
@@ -19432,7 +19484,7 @@ F: Documentation/devicetree/bindings/sound/samsung*
F: sound/soc/samsung/
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19467,7 +19519,7 @@ S: Maintained
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19493,7 +19545,7 @@ F: drivers/media/platform/samsung/s3c-camif/
F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5
@@ -19514,7 +19566,7 @@ S: Supported
F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
@@ -19536,7 +19588,7 @@ F: Documentation/devicetree/bindings/media/samsung,fimc.yaml
F: drivers/media/platform/samsung/exynos4-is/
SAMSUNG SOC CLOCK DRIVERS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
R: Alim Akhtar <alim.akhtar@samsung.com>
@@ -19568,7 +19620,7 @@ F: drivers/net/ethernet/samsung/sxgbe/
SAMSUNG THERMAL DRIVER
M: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -19655,7 +19707,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <jejb@linux.ibm.com>
+M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: "Martin K. Petersen" <martin.petersen@oracle.com>
L: linux-scsi@vger.kernel.org
S: Maintained
@@ -20164,7 +20216,6 @@ F: include/linux/platform_data/simplefb.h
SIOX
M: Thorsten Scherer <t.scherer@eckelmann.de>
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported
F: drivers/gpio/gpio-siox.c
@@ -21312,7 +21363,7 @@ R: Gustavo Padovan <gustavo@padovan.org>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/sync_file.rst
F: drivers/dma-buf/dma-fence*
F: drivers/dma-buf/sw_sync.c
@@ -22272,13 +22323,20 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: include/linux/clocksource.h
F: include/linux/time.h
+F: include/linux/timekeeper_internal.h
+F: include/linux/timekeeping.h
F: include/linux/timex.h
F: include/uapi/linux/time.h
F: include/uapi/linux/timex.h
F: kernel/time/alarmtimer.c
-F: kernel/time/clocksource.c
-F: kernel/time/ntp.c
-F: kernel/time/time*.c
+F: kernel/time/clocksource*
+F: kernel/time/ntp*
+F: kernel/time/time.c
+F: kernel/time/timeconst.bc
+F: kernel/time/timeconv.c
+F: kernel/time/timecounter.c
+F: kernel/time/timekeeping*
+F: kernel/time/time_test.c
F: tools/testing/selftests/timers/
TIPC NETWORK LAYER
@@ -22402,6 +22460,7 @@ S: Maintained
W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
+F: Documentation/devicetree/bindings/tpm/
F: drivers/char/tpm/
TPS546D24 DRIVER
@@ -22548,6 +22607,7 @@ Q: https://patchwork.kernel.org/project/linux-pm/list/
B: https://bugzilla.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat
F: tools/power/x86/turbostat/
+F: tools/testing/selftests/turbostat/
TW5864 VIDEO4LINUX DRIVER
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
@@ -22817,7 +22877,7 @@ F: drivers/usb/host/ehci*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org>
-M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <bentiss@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@@ -23087,7 +23147,7 @@ USERSPACE DMA BUFFER DRIVER
M: Gerd Hoffmann <kraxel@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/udmabuf.c
F: include/uapi/linux/udmabuf.h
@@ -23269,7 +23329,7 @@ F: drivers/vfio/pci/virtio
VGA_SWITCHEROO
R: Lukas Wunner <lukas@wunner.de>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vga-switcheroo.rst
F: drivers/gpu/vga/vga_switcheroo.c
F: include/linux/vga_switcheroo.h
@@ -23462,7 +23522,7 @@ R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux.dev
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ci/xfails/virtio*
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h
@@ -23626,9 +23686,9 @@ S: Supported
F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE
-M: Ajay Kaher <akaher@vmware.com>
-M: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Ajay Kaher <ajay.kaher@broadcom.com>
+M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
S: Supported
@@ -23637,34 +23697,34 @@ F: arch/x86/include/asm/vmware.h
F: arch/x86/kernel/cpu/vmware.c
VMWARE PVRDMA DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/vmw_pvrdma/
VMWARE PVSCSI DRIVER
-M: Vishal Bhakta <vbhakta@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Vishal Bhakta <vishal.bhakta@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/vmw_pvscsi.c
F: drivers/scsi/vmw_pvscsi.h
VMWARE VIRTUAL PTP CLOCK DRIVER
-M: Jeff Sipek <jsipek@vmware.com>
-R: Ajay Kaher <akaher@vmware.com>
-R: Alexey Makhalov <amakhalov@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Nick Shi <nick.shi@broadcom.com>
+R: Ajay Kaher <ajay.kaher@broadcom.com>
+R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
VMWARE VMCI DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: drivers/misc/vmw_vmci/
@@ -23679,16 +23739,16 @@ F: drivers/input/mouse/vmmouse.c
F: drivers/input/mouse/vmmouse.h
VMWARE VMXNET3 ETHERNET DRIVER
-M: Ronak Doshi <doshir@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Ronak Doshi <ronak.doshi@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/vmxnet3/
VMWARE VSOCK VMCI TRANSPORT DRIVER
-M: Bryan Tan <bryantan@vmware.com>
-M: Vishnu Dasa <vdasa@vmware.com>
-R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
+M: Bryan Tan <bryan-bt.tan@broadcom.com>
+M: Vishnu Dasa <vishnu.dasa@broadcom.com>
+R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: net/vmw_vsock/vmci_transport*
@@ -23756,7 +23816,7 @@ S: Orphan
F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
S: Maintained
F: Documentation/devicetree/bindings/w1/
F: Documentation/w1/
diff --git a/Makefile b/Makefile
index 763b6792d3d5..40fb2ca6fe4c 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 9f066785bb71..30f7930275d8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
+config ARCH_CONFIGURES_CPU_MITIGATIONS
+ bool
+
+if !ARCH_CONFIGURES_CPU_MITIGATIONS
+config CPU_MITIGATIONS
+ def_bool y
+endif
+
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS
@@ -1172,12 +1180,12 @@ config PAGE_SIZE_LESS_THAN_256KB
config PAGE_SHIFT
int
- default 12 if PAGE_SIZE_4KB
- default 13 if PAGE_SIZE_8KB
- default 14 if PAGE_SIZE_16KB
- default 15 if PAGE_SIZE_32KB
- default 16 if PAGE_SIZE_64KB
- default 18 if PAGE_SIZE_256KB
+ default 12 if PAGE_SIZE_4KB
+ default 13 if PAGE_SIZE_8KB
+ default 14 if PAGE_SIZE_16KB
+ default 15 if PAGE_SIZE_32KB
+ default 16 if PAGE_SIZE_64KB
+ default 18 if PAGE_SIZE_256KB
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 99d2845f3feb..4092bec198be 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
- select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index 5648748c285f..5a8550124b73 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-# uImage build relies on mkimage being availble on your host for ARC target
+# uImage build relies on mkimage being available on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
-# and make sure it's reacable from your PATH
+# and make sure it's reachable from your PATH
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 3434c8131ecd..c0a812674ce9 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -119,9 +119,9 @@
/*
* The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru
- * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+ * interrupts - setup accordingly in platform init (plat-axs10x/ax10x.c)
*
- * So here we mimic a direct connection betwen them, ignoring the
+ * So here we mimic a direct connection between them, ignoring the
* ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
*
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 6691f4255077..41b980df862b 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
- #interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 90a412026e64..0e0e2d337bf8 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -113,7 +113,7 @@
/*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
- * This node is intentionally put outside of MB above becase
+ * This node is intentionally put outside of MB above because
* it maps areas outside of MB's 0xez-0xfz.
*/
uio_ev: uio@d0000000 {
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
deleted file mode 100644
index 05fc7ed59712..000000000000
--- a/arch/arc/include/asm/cachetype.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARC_CACHETYPE_H
-#define __ASM_ARC_CACHETYPE_H
-
-#include <linux/types.h>
-
-#define cpu_dcache_is_aliasing() true
-
-#endif
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e56704..f496dbc4640b 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -12,7 +12,7 @@
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 92c3e9f13252..00946fe04c9b 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index cf1ba376e992..38c35722cebf 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
@@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c574712ad865..9cd79263acba 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d64c..936a2f21f315 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -46,7 +46,7 @@
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f05b..9963bb1a5733 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index f3eea3f30b2e..8ebec1b21d24 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 00b9318e551e..cf79df0b2570 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-extern int syscall_trace_entry(struct pt_regs *);
+extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464ffd..719112af0f41 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index e0913f52c2cd..990f834909f0 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 4c530cf131f3..12daaf3a61ea 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 02109cd48ee1..8d1f1ef44ba7 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
- * (1) It is portable to any architecure
+ * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2e49c81c8086..e238b5fd3c8c 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -5,7 +5,7 @@
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
-#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <linux/linkage.h> /* ARC_{ENTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
@@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check
-VECTOR EV_TLBMissI ; Intruction TLB miss
+VECTOR EV_TLBMissI ; Instruction TLB miss
VECTOR EV_TLBMissD ; Data TLB miss
VECTOR EV_TLBProtV ; Protection Violation
VECTOR EV_PrivilegeV ; Privilege Violation
@@ -76,11 +76,11 @@ ENTRY(handle_interrupt)
# query in hard ISR path would return false (since .IE is set) which would
# trips genirq interrupt handling asserts.
#
- # So do a "soft" disable of interrutps here.
+ # So do a "soft" disable of interrupts here.
#
# Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts
- # seperately in AUX_IRQ_ACT.active and will not take new interrupts
+ # separately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 089f6680518f..3c7e74aba679 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
- ; MC excpetions disable MMU
+ ; MC exceptions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r10, 8
@@ -209,7 +209,7 @@ trap_with_param:
; ---------------------------------------------
; syscall TRAP
-; ABI: (r0-r7) upto 8 args, (r8) syscall number
+; ABI: (r0-r7) up to 8 args, (r8) syscall number
; ---------------------------------------------
ENTRY(EV_Trap)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 9152782444b5..8d541f53fae3 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary)
; setup stack (fp, sp)
mov fp, 0
- ; set it's stack base to tsk->thread_info bottom
+ ; set its stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 678898757e47..f324f0e3341a 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -56,7 +56,7 @@ void arc_init_IRQ(void)
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
- * ARCv2 core intc provides multiple interrupt priorities (upto 16).
+ * ARCv2 core intc provides multiple interrupt priorities (up to 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index e71d64119d71..f8e2960832d9 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -190,7 +190,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
}
}
-int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+static int
+__kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
@@ -241,8 +242,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
return 0;
}
-static int __kprobes arc_post_kprobe_handler(unsigned long addr,
- struct pt_regs *regs)
+static int
+__kprobes arc_post_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index adff957962da..6e5a651cd75c 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -38,7 +38,7 @@
* (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and
* h/w condition names.
- * At the time of probe, we loop thru each index and find it's name to
+ * At the time of probe, we loop thru each index and find its name to
* complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really
*/
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index d08a5092c2b4..7b6a9beba9db 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info)
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
- * Make sure it's placement/sz matches what Linux is built with
+ * Make sure its placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 8f6f4a542964..fefa705a8638 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -8,15 +8,16 @@
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
- * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
- * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -do_signal() no longer needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch
+ * implementation
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
- * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * to avoid kernel synthesizing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 9b9570b79362..a19751e824fb 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs,
/*
* Entry point for miscll errors such as Nested Exceptions
- * -Duplicate TLB entry is handled seperately though
+ * -Duplicate TLB entry is handled separately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 549c3f407918..61a1b2b96e1d 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -41,8 +41,8 @@ SECTIONS
#endif
/*
- * The reason for having a seperate subsection .init.ramfs is to
- * prevent objump from including it in kernel dumps
+ * The reason for having a separate subsection .init.ramfs is to
+ * prevent objdump from including it in kernel dumps
*
* Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ad702b49aeb3..cae4a7aae0ed 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
- * its better to move to a new ASID rather than searching for
+ * it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
@@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
- * Lets see the use cases when current->mm != vma->mm and we land here
+ * Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index e054780a8fe0..dc65e87a531f 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: April 2011 :
- * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
- * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
- * entry, so that it doesn't knock out it's I-TLB entry
+ * -For MMU V2, we need not do heuristics at the time of committing a D-TLB
+ * entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
- * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
index 4f609e9e510e..009d2c832421 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
@@ -242,7 +242,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -263,7 +263,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -280,7 +280,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -296,7 +296,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <3300000>;
+ regulator-suspend-microvolt = <3300000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
index 217e9b96c61e..20b2497657ae 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
@@ -293,7 +293,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -314,7 +314,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -331,7 +331,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -346,7 +346,7 @@
regulator-max-microvolt = <3700000>;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
index 3fdece5bd31f..5248a058230c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
@@ -805,6 +805,7 @@
&pinctrl_usb_pwr>;
dr_mode = "host";
power-active-high;
+ over-current-active-low;
disable-over-current;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
index 1235a71c6abe..52869e68f833 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
@@ -666,7 +666,7 @@
bus-width = <4>;
no-1-8-v;
no-sdio;
- no-emmc;
+ no-mmc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
index ba7231b364bb..7bab113ca6da 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
@@ -210,6 +210,7 @@
remote-endpoint = <&mipi_from_sensor>;
clock-lanes = <0>;
data-lanes = <1>;
+ link-frequencies = /bits/ 64 <330000000>;
};
};
};
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 000000000000..2189e507c8e0
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 31755a378c73..ff2a4a4d8220 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
static struct gpiod_lookup_table tusb_gpio_table = {
.dev_id = "musb-tusb",
.table = {
- GPIO_LOOKUP("gpio-0-15", 0, "enable",
- GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-48-63", 10, "int",
- GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -140,12 +138,11 @@ static int slot1_cover_open;
static int slot2_cover_open;
static struct device *mmc_device;
-static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
+static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
/* Slot switch, GPIO 96 */
- GPIO_LOOKUP("gpio-80-111", 16,
- "switch", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
+ /* Slot switch, GPIO 96 */
+ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
/* Slot index 1, VSD power, GPIO 23 */
- GPIO_LOOKUP_IDX("gpio-16-31", 7,
- "vsd", 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
/* Slot index 1, VIO power, GPIO 9 */
- GPIO_LOOKUP_IDX("gpio-0-15", 9,
- "vio", 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
{ }
},
};
@@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
static void __init n8x0_mmc_init(void)
{
- gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
-
if (board_is_n810()) {
mmc1_data.slots[0].name = "external";
@@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
mmc1_data.slots[1].name = "internal";
mmc1_data.slots[1].ban_openended = 1;
gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
+ } else {
+ gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
}
mmc1_data.nr_slots = 2;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
index 3c42240e78e2..4aaf5a0c1ed8 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -41,7 +41,7 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc1 0>;
- clocks = <&usb2_lpcg 0>;
+ clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
rx-burst-size-dword = <0x10>;
@@ -58,7 +58,7 @@ conn_subsys: bus@5b000000 {
usbphy1: usbphy@5b100000 {
compatible = "fsl,imx7ulp-usbphy";
reg = <0x5b100000 0x1000>;
- clocks = <&usb2_lpcg 1>;
+ clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
power-domains = <&pd IMX_SC_R_USB_0_PHY>;
status = "disabled";
};
@@ -67,8 +67,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b010000 0x10000>;
clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
- <&sdhc0_lpcg IMX_LPCG_CLK_0>,
- <&sdhc0_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_0>;
status = "disabled";
@@ -78,8 +78,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b020000 0x10000>;
clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
- <&sdhc1_lpcg IMX_LPCG_CLK_0>,
- <&sdhc1_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_1>;
fsl,tuning-start-tap = <20>;
@@ -91,8 +91,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b030000 0x10000>;
clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
- <&sdhc2_lpcg IMX_LPCG_CLK_0>,
- <&sdhc2_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_2>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
index cab3468b1875..f7a91d43a0ff 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
@@ -28,8 +28,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi0_lpcg 0>,
- <&spi0_lpcg 1>;
+ clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
+ <&spi0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -44,8 +44,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi1_lpcg 0>,
- <&spi1_lpcg 1>;
+ clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
+ <&spi1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -60,8 +60,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi2_lpcg 0>,
- <&spi2_lpcg 1>;
+ clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
+ <&spi2_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -76,8 +76,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&spi3_lpcg 0>,
- <&spi3_lpcg 1>;
+ clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
+ <&spi3_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@@ -145,8 +145,8 @@ dma_subsys: bus@5a000000 {
compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
reg = <0x5a190000 0x1000>;
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_pwm_lpcg 1>,
- <&adma_pwm_lpcg 0>;
+ clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&adma_pwm_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -355,8 +355,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a880000 0x10000>;
interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&adc0_lpcg 0>,
- <&adc0_lpcg 1>;
+ clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
+ <&adc0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -370,8 +370,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a890000 0x10000>;
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&adc1_lpcg 0>,
- <&adc1_lpcg 1>;
+ clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
+ <&adc1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@@ -384,8 +384,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a8d0000 0x10000>;
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@@ -405,8 +405,8 @@ dma_subsys: bus@5a000000 {
* CAN1 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@@ -426,8 +426,8 @@ dma_subsys: bus@5a000000 {
* CAN2 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
- clocks = <&can0_lpcg 1>,
- <&can0_lpcg 0>;
+ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+ <&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
index 7e510b21bbac..764c1a08e3b1 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
@@ -25,8 +25,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d000000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm0_lpcg 4>,
- <&pwm0_lpcg 1>;
+ clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
+ <&pwm0_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -38,8 +38,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d010000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm1_lpcg 4>,
- <&pwm1_lpcg 1>;
+ clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
+ <&pwm1_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -51,8 +51,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d020000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm2_lpcg 4>,
- <&pwm2_lpcg 1>;
+ clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
+ <&pwm2_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@@ -64,8 +64,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d030000 0x10000>;
clock-names = "ipg", "per";
- clocks = <&pwm3_lpcg 4>,
- <&pwm3_lpcg 1>;
+ clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
+ <&pwm3_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
index 41c79d2ebdd6..f24b14744799 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
@@ -14,6 +14,7 @@
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
+ vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@@ -183,7 +184,6 @@
};
&usb3_phy0 {
- vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index d5c400b355af..f5491a608b2f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -14,6 +14,7 @@
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
+ vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@@ -202,7 +203,6 @@
};
&usb3_phy0 {
- vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index bfc5c81a5bd4..8141926e4ef1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -1672,7 +1672,7 @@
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
clock-names = "pclk", "wrap", "phy", "axi";
- assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM1_PIX>,
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM2_PIX>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_CLK_24M>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
index 11626fae5f97..aa9f28c4431d 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
@@ -153,15 +153,15 @@
};
&flexcan2 {
- clocks = <&can1_lpcg 1>,
- <&can1_lpcg 0>;
+ clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
+ <&can1_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
&flexcan3 {
- clocks = <&can2_lpcg 1>,
- <&can2_lpcg 0>;
+ clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
+ <&can2_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 0c38f7b51763..234e3b23d7a8 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -129,7 +129,7 @@
};
&pio {
- eth_default: eth_default {
+ eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@@ -156,7 +156,7 @@
};
};
- eth_sleep: eth_sleep {
+ eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@@ -182,14 +182,14 @@
};
};
- usb0_id_pins_float: usb0_iddig {
+ usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
- usb1_id_pins_float: usb1_iddig {
+ usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 6d218caa198c..082672efba0a 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -249,10 +249,11 @@
#clock-cells = <1>;
};
- infracfg: syscon@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pericfg: syscon@10003000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 3ee9266fa8e9..917fa39a74f8 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -252,7 +252,7 @@
clock-names = "hif_sel";
};
- cir: cir@10009000 {
+ cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@@ -283,16 +283,14 @@
};
};
- apmixedsys: apmixedsys@10209000 {
- compatible = "mediatek,mt7622-apmixedsys",
- "syscon";
+ apmixedsys: clock-controller@10209000 {
+ compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
- topckgen: topckgen@10210000 {
- compatible = "mediatek,mt7622-topckgen",
- "syscon";
+ topckgen: clock-controller@10210000 {
+ compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@@ -515,7 +513,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
- reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@@ -734,9 +731,8 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
- ssusbsys: ssusbsys@1a000000 {
- compatible = "mediatek,mt7622-ssusbsys",
- "syscon";
+ ssusbsys: clock-controller@1a000000 {
+ compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -793,9 +789,8 @@
};
};
- pciesys: pciesys@1a100800 {
- compatible = "mediatek,mt7622-pciesys",
- "syscon";
+ pciesys: clock-controller@1a100800 {
+ compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -921,12 +916,13 @@
};
};
- hifsys: syscon@1af00000 {
- compatible = "mediatek,mt7622-hifsys", "syscon";
+ hifsys: clock-controller@1af00000 {
+ compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
+ #clock-cells = <1>;
};
- ethsys: syscon@1b000000 {
+ ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@@ -966,9 +962,7 @@
};
eth: ethernet@1b100000 {
- compatible = "mediatek,mt7622-eth",
- "mediatek,mt2701-eth",
- "syscon";
+ compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
index e04b1c0c0ebb..ed79ad1ae871 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
@@ -146,19 +146,19 @@
&cpu_thermal {
cooling-maps {
- cpu-active-high {
+ map-cpu-active-high {
/* active: set fan to cooling level 2 */
cooling-device = <&fan 2 2>;
trip = <&cpu_trip_active_high>;
};
- cpu-active-med {
+ map-cpu-active-med {
/* active: set fan to cooling level 1 */
cooling-device = <&fan 1 1>;
trip = <&cpu_trip_active_med>;
};
- cpu-active-low {
+ map-cpu-active-low {
/* active: set fan to cooling level 0 */
cooling-device = <&fan 0 0>;
trip = <&cpu_trip_active_low>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index b3f416b9a7a4..559990dcd1d1 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -332,9 +332,8 @@
reg = <0 0x1100c800 0 0x800>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&infracfg CLK_INFRA_THERM_CK>,
- <&infracfg CLK_INFRA_ADC_26M_CK>,
- <&infracfg CLK_INFRA_ADC_FRC_CK>;
- clock-names = "therm", "auxadc", "adc_32k";
+ <&infracfg CLK_INFRA_ADC_26M_CK>;
+ clock-names = "therm", "auxadc";
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
#thermal-sensor-cells = <1>;
@@ -492,8 +491,6 @@
compatible = "mediatek,mt7986-ethsys",
"syscon";
reg = <0 0x15000000 0 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -556,7 +553,6 @@
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
- #reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
mediatek,ethsys = <&ethsys>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 6bd7424ef66c..100191c6453b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -433,7 +433,6 @@
};
&mt6358_vgpu_reg {
- regulator-min-microvolt = <625000>;
regulator-max-microvolt = <900000>;
regulator-coupled-with = <&mt6358_vsram_gpu_reg>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 93dfbf130231..774ae5d9143f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1637,6 +1637,7 @@
compatible = "mediatek,mt8183-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
};
gpu: gpu@13040000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index 3dea28f1d806..1807e9d6cb0e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -1296,7 +1296,7 @@
* regulator coupling requirements.
*/
regulator-name = "ppvar_dvdd_vgpu";
- regulator-min-microvolt = <600000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <6250>;
regulator-enable-ramp-delay = <200>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 9b738f6a5d21..7a704246678f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1421,7 +1421,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1431,7 +1431,7 @@
mt6315_6_vbuck3: vbuck3 {
regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1448,7 +1448,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <606250>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 05e401670bce..84cbdf6e9eb0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -1464,6 +1464,7 @@
reg = <0 0x14001000 0 0x1000>;
interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
<CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index f94c07f8b933..4a11918da370 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -264,6 +264,38 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu1 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu2 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu3 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu4 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu5 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu6 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu7 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
&dp_intf0 {
status = "okay";
@@ -1214,7 +1246,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@@ -1232,7 +1264,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <625000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index ea6dc220e1cc..5d8b68f86ce4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2028,6 +2028,7 @@
compatible = "mediatek,mt8195-vppsys0", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
};
dma-controller@14001000 {
@@ -2251,6 +2252,7 @@
compatible = "mediatek,mt8195-vppsys1", "syscon";
reg = <0 0x14f00000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
};
mutex@14f01000 {
@@ -3080,6 +3082,7 @@
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
};
@@ -3261,6 +3264,7 @@
interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
};
@@ -3331,6 +3335,7 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
clock-names = "vdo1_mutex";
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index f3a6da8b2890..5260c63db007 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -944,6 +944,8 @@ ap_spi_fp: &spi10 {
vddrf-supply = <&pp1300_l2c>;
vddch0-supply = <&pp3300_l10c>;
max-speed = <3200000>;
+
+ qcom,local-bd-address-broken;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 7e7f0f0fb41b..41f51d326111 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -3707,7 +3707,7 @@
compatible = "qcom,sc7280-adsp-pas";
reg = <0 0x03700000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3944,7 +3944,7 @@
compatible = "qcom,sc7280-cdsp-pas";
reg = <0 0x0a300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 32afc78d5b76..053f7861c3ce 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -2701,7 +2701,7 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
power-domains = <&gcc USB30_SEC_GDSC>;
interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 10 IRQ_TYPE_EDGE_BOTH>,
<&pdc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index a5b194813079..d0f82e12289e 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -1774,6 +1774,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_4_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie4_phy>;
phy-names = "pciephy";
@@ -1872,6 +1873,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3b_phy>;
phy-names = "pciephy";
@@ -1970,6 +1972,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3a_phy>;
phy-names = "pciephy";
@@ -2071,6 +2074,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2b_phy>;
phy-names = "pciephy";
@@ -2169,6 +2173,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2a_phy>;
phy-names = "pciephy";
@@ -2641,7 +2646,7 @@
compatible = "qcom,sc8280xp-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -4977,7 +4982,7 @@
compatible = "qcom,sc8280xp-nsp0-pas";
reg = <0 0x1b300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5108,7 +5113,7 @@
compatible = "qcom,sc8280xp-nsp1-pas";
reg = <0 0x21300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 24bcec3366ef..0be053555602 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -1252,7 +1252,7 @@
compatible = "qcom,sm6350-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -1511,7 +1511,7 @@
compatible = "qcom,sm6350-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index 4386f8a9c636..f40509d91bbd 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -1561,7 +1561,7 @@
compatible = "qcom,sm6375-adsp-pas";
reg = <0 0x0a400000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 39bd8f0eba1e..7f2333c9d17d 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -3062,7 +3062,7 @@
compatible = "qcom,sm8250-slpi-pas";
reg = <0 0x05c00000 0 0x4000>;
- interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 9 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3766,7 +3766,7 @@
compatible = "qcom,sm8250-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5928,7 +5928,7 @@
compatible = "qcom,sm8250-adsp-pas";
reg = <0 0x17300000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index b86be34a912b..024d2653cc30 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -1777,12 +1777,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
<0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5980.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5981 0x1>,
- <0x100 &gic_its 0x5980 0x1>;
+ msi-map = <0x0 &gic_its 0x5980 0x1>,
+ <0x100 &gic_its 0x5981 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
@@ -1900,12 +1896,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5a00.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5a01 0x1>,
- <0x100 &gic_its 0x5a00 0x1>;
+ msi-map = <0x0 &gic_its 0x5a00 0x1>,
+ <0x100 &gic_its 0x5a01 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index 3904348075f6..3348bc06db48 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -1755,9 +1755,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
iommu-map = <0x0 &apps_smmu 0x1400 0x1>,
<0x100 &apps_smmu 0x1401 0x1>;
@@ -1867,9 +1866,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_1 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
iommu-map = <0x0 &apps_smmu 0x1480 0x1>,
<0x100 &apps_smmu 0x1481 0x1>;
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index ba72d8f38420..eb117866e59f 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -2274,9 +2274,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <0>;
@@ -2402,9 +2401,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <1>;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 8e517f76189e..6b40082bac68 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -284,7 +284,7 @@
domain-idle-states {
CLUSTER_CL4: cluster-sleep-0 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "l2-ret";
arm,psci-suspend-param = <0x01000044>;
entry-latency-us = <350>;
@@ -293,7 +293,7 @@
};
CLUSTER_CL5: cluster-sleep-1 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "ret-pll-off";
arm,psci-suspend-param = <0x01000054>;
entry-latency-us = <2200>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index 5846a11f0e84..d5e035823eb5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -663,7 +663,7 @@ camera: &i2c7 {
port@1 {
reg = <1>;
- mipi1_in_panel: endpoint@1 {
+ mipi1_in_panel: endpoint {
remote-endpoint = <&mipi1_out_panel>;
};
};
@@ -689,7 +689,6 @@ camera: &i2c7 {
ep-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
/* PERST# asserted in S3 */
- pcie-reset-suspend = <1>;
vpcie3v3-supply = <&wlan_3v3>;
vpcie1v8-supply = <&pp1800_pcie>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index dfb2a0bdea5b..9586bb12a5d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -611,7 +611,7 @@
#size-cells = <0>;
interface@0 { /* interface 0 of configuration 1 */
- compatible = "usbbda,8156.config1.0";
+ compatible = "usbifbda,8156.config1.0";
reg = <0 1>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 054c6a4d1a45..294eb2de263d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -779,7 +779,6 @@
};
&pcie0 {
- bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2c3984a880af..f6f15946579e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -194,6 +194,8 @@
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
+ vpcie3v3-supply = <&vcc3v3_baseboard>;
+ vpcie12v-supply = <&dc_12v>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index c08e69391c01..ccbe3a7a1d2c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -79,6 +79,26 @@
regulator-max-microvolt = <5000000>;
};
+ vcca_0v9: vcca-0v9-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcca_1v8: vcca-1v8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
vdd_log: vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm2 0 25000 1>;
@@ -416,16 +436,28 @@
gpio1830-supply = <&vcc_1v8>;
};
-&pmu_io_domains {
- status = "okay";
- pmu1830-supply = <&vcc_1v8>;
+&pcie0 {
+ /* PCIe PHY supplies */
+ vpcie0v9-supply = <&vcca_0v9>;
+ vpcie1v8-supply = <&vcca_1v8>;
};
-&pwm2 {
- status = "okay";
+&pcie_clkreqn_cpm {
+ rockchip,pins =
+ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&q7_thermal_pin>;
+
+ gpios {
+ q7_thermal_pin: q7-thermal-pin {
+ rockchip,pins =
+ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@@ -458,11 +490,20 @@
usb3 {
usb3_id: usb3-id {
rockchip,pins =
- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
+&pmu_io_domains {
+ status = "okay";
+ pmu1830-supply = <&vcc_1v8>;
+};
+
+&pwm2 {
+ status = "okay";
+};
+
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
index 6ecdf5d28339..c1194d1e438d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
@@ -447,7 +447,6 @@
&pcie2x1 {
reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index 7b5f3904ef61..c87fad2c34cb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -416,6 +416,8 @@
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -525,9 +527,9 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7531";
- reg = <0>;
+ reg = <0x1f>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
index a8a4cc190eb3..a3112d5df200 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
@@ -523,7 +523,6 @@
&pcie2x1 {
reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_mini_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
index cce1c8e83587..94ecb9b4f98f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
@@ -216,9 +216,9 @@
pinctrl-0 = <&i2c7m0_xfer>;
status = "okay";
- es8316: audio-codec@11 {
+ es8316: audio-codec@10 {
compatible = "everest,es8316";
- reg = <0x11>;
+ reg = <0x10>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clock-rates = <12288000>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 1b606ea5b6cf..1a604429fb26 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -485,6 +485,7 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
@@ -506,7 +507,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
index 67414d72e2b6..22bbfbe729c1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
@@ -456,6 +456,7 @@
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
pinctrl-names = "default";
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc4v0_sys>;
vcc2-supply = <&vcc4v0_sys>;
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 3b0e8248e1a4..a75de2665d84 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
/*
- * Generate 'num' values from -1 to 30 with -1 rejected by the
- * __flush_tlb_range() loop below.
+ * Generate 'num' values from -1 to 31 with -1 rejected by the
+ * __flush_tlb_range() loop below. Its return value is only
+ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
+ * 'pages' is more than that, you must iterate over the overall
+ * range.
*/
-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
-#define __TLBI_RANGE_NUM(pages, scale) \
- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
+#define __TLBI_RANGE_NUM(pages, scale) \
+ ({ \
+ int __pages = min((pages), \
+ __TLBI_RANGE_PAGES(31, (scale))); \
+ (__pages >> (5 * (scale) + 1)) - 1; \
+ })
/*
* TLB Invalidation
@@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* 3. If there is 1 page remaining, flush it through non-range operations. Range
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
- *
- * Note that certain ranges can be represented by either num = 31 and
- * scale or num = 0 and scale + 1. The loop below favours the latter
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
*/
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ce08b744aaab..cb68adcabe07 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -289,8 +289,28 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2
+
+ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
+ pre_disable_mmu_workaround
+ msr sctlr_el2, x0
+ isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
+
+ /*
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
+ * RES1 in that case. Publish the E2H bit early so that
+ * it can be picked up by the init_el2_state macro.
+ *
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+ * don't advertise it (they predate this relaxation).
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
+
+ orr x0, x0, #HCR_E2H
+1:
msr hcr_el2, x0
isb
@@ -303,30 +323,16 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
- /*
- * Compliant CPUs advertise their VHE-onlyness with
- * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
- * RES1 in that case.
- *
- * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
- * don't advertise it (they predate this relaxation).
- */
- mrs_s x0, SYS_ID_AA64MMFR4_EL1
- ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
- tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
-
mrs x0, hcr_el2
and x0, x0, #HCR_E2H
cbz x0, 2f
-1:
+
/* Set a sane SCTLR_EL1, the VHE way */
- pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H
b 3f
2:
- pre_disable_mmu_workaround
msr sctlr_el1, x1
mov x2, xzr
3:
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 162b030ab9da..0d022599eb61 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -761,7 +761,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
{
unsigned int vq;
bool active;
- bool fpsimd_only;
enum vec_type task_type;
memset(header, 0, sizeof(*header));
@@ -777,12 +776,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
case ARM64_VEC_SVE:
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
header->flags |= SVE_PT_VL_INHERIT;
- fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
break;
case ARM64_VEC_SME:
if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
header->flags |= SVE_PT_VL_INHERIT;
- fpsimd_only = false;
break;
default:
WARN_ON_ONCE(1);
@@ -790,7 +787,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
}
if (active) {
- if (fpsimd_only) {
+ if (target->thread.fp_type == FP_STATE_FPSIMD) {
header->flags |= SVE_PT_REGS_FPSIMD;
} else {
header->flags |= SVE_PT_REGS_SVE;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3dee5490eea9..c4a0a35e02c7 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2597,14 +2597,11 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
- if (is_protected_kvm_enabled()) {
- kvm_info("Protected nVHE mode initialized successfully\n");
- } else if (in_hyp_mode) {
- kvm_info("VHE mode initialized successfully\n");
- } else {
- char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
- kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
- }
+ kvm_info("%s%sVHE mode initialized successfully\n",
+ in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
+ "Protected " : "Hyp "),
+ in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
+ "h" : "n"));
/*
* FIXME: Do something reasonable if kvm_init() fails after pKVM
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index a60fb13e2192..2fc68da4036d 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -154,7 +154,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
/* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt, false);
- __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
+ TLBI_TTL_UNKNOWN);
dsb(ish);
__tlbi(vmalle1is);
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 3fae5830f8d2..5a59ef88b646 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;
@@ -843,12 +843,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* Perform the appropriate TLB invalidation based on the
* evicted pte value (if any).
*/
- if (kvm_pte_table(ctx->old, ctx->level))
- kvm_tlb_flush_vmid_range(mmu, ctx->addr,
- kvm_granule_size(ctx->level));
- else if (kvm_pte_valid(ctx->old))
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ u64 size = kvm_granule_size(ctx->level);
+ u64 addr = ALIGN_DOWN(ctx->addr, size);
+
+ kvm_tlb_flush_vmid_range(mmu, addr, size);
+ } else if (kvm_pte_valid(ctx->old)) {
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
ctx->addr, ctx->level);
+ }
}
if (stage2_pte_is_counted(ctx->old))
@@ -896,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- if (!stage2_unmap_defer_tlb_flush(pgt))
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
- ctx->addr, ctx->level);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ TLBI_TTL_UNKNOWN);
+ } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ ctx->level);
+ }
}
mm_ops->put_page(ctx->ptep);
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index b32e2940df7d..1a60b95381e8 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -171,7 +171,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
/* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt);
- __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
+ TLBI_TTL_UNKNOWN);
dsb(ish);
__tlbi(vmalle1is);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 18680771cdb0..dc04bc767865 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1637,7 +1637,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (esr_fsc_is_permission_fault(esr)) {
+ if (esr_fsc_is_translation_fault(esr)) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
kvm_inject_size_fault(vcpu);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0f0e10bb0a95..b872b003a55f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -276,7 +276,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
+ p4dp = p4d_alloc(mm, pgdp, addr);
+ if (!p4dp)
+ return NULL;
+
pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0c4e3ecf989d..0e270a1c51e6 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!can_set_direct_map())
- return true;
-
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c5b461dda438..122021f9bdfc 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -943,7 +943,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
- emit(A64_REV32(is64, dst, dst), ctx);
+ emit(A64_REV32(0, dst, dst), ctx);
/* upper 32 bits already cleared */
break;
case 64:
@@ -1256,7 +1256,7 @@ emit_cond_jmp:
} else {
emit_a64_mov_i(1, tmp, off, ctx);
if (sign_extend)
- emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
+ emit(A64_LDRSW(dst, src, tmp), ctx);
else
emit(A64_LDR32(dst, src, tmp), ctx);
}
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 1140051a0c45..1150b77fa281 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
+ .hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index a5f300ec6f28..54ad04dacdee 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -595,7 +595,7 @@ config ARCH_SELECTS_CRASH_DUMP
select RELOCATABLE
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
- def_bool CRASH_CORE
+ def_bool CRASH_RESERVE
config RELOCATABLE
bool "Relocatable kernel"
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 49a70f8c3cab..b6aeb1f70e2a 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -100,6 +100,13 @@
#size-cells = <2>;
dma-coherent;
+ isa@18000000 {
+ compatible = "isa";
+ #size-cells = <1>;
+ #address-cells = <2>;
+ ranges = <1 0x0 0x0 0x18000000 0x4000>;
+ };
+
liointc0: interrupt-controller@1fe01400 {
compatible = "loongson,liointc-2.0";
reg = <0x0 0x1fe01400 0x0 0x40>,
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index dca91caf895e..74b99bd234cc 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -61,12 +61,45 @@
&gmac0 {
status = "okay";
+
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <2>;
+ };
+ };
};
&gmac1 {
status = "okay";
+
+ phy-mode = "gmii";
+ phy-handle = <&phy1>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <2>;
+ };
+ };
};
&gmac2 {
status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&phy2>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy2: ethernet-phy@2 {
+ reg = <0>;
+ };
+ };
};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index a231949b5f55..9eab2d02cbe8 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -51,6 +51,13 @@
#address-cells = <2>;
#size-cells = <2>;
+ isa@18400000 {
+ compatible = "isa";
+ #size-cells = <1>;
+ #address-cells = <2>;
+ ranges = <1 0x0 0x0 0x18400000 0x4000>;
+ };
+
pmc: power-management@100d0000 {
compatible = "loongson,ls2k2000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x0 0x100d0000 0x0 0x58>;
@@ -109,6 +116,8 @@
msi: msi-controller@1fe01140 {
compatible = "loongson,pch-msi-1.0";
reg = <0x0 0x1fe01140 0x0 0x8>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
msi-controller;
loongson,msi-base-vec = <64>;
loongson,msi-num-vecs = <192>;
@@ -140,27 +149,34 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ msi-parent = <&msi>;
bus-range = <0x0 0xff>;
- ranges = <0x01000000 0x0 0x00008000 0x0 0x18400000 0x0 0x00008000>,
+ ranges = <0x01000000 0x0 0x00008000 0x0 0x18408000 0x0 0x00008000>,
<0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
- interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
- interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac2: ethernet@3,2 {
reg = <0x1a00 0x0 0x0 0x0 0x0>;
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index b24437e28c6e..7bd47d65bf7a 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -11,6 +11,7 @@
#define _ASM_ADDRSPACE_H
#include <linux/const.h>
+#include <linux/sizes.h>
#include <asm/loongarch.h>
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_reserve.h
index 218bdbfa527b..a1d9b84b1c7d 100644
--- a/arch/loongarch/include/asm/crash_core.h
+++ b/arch/loongarch/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LOONGARCH_CRASH_CORE_H
-#define _LOONGARCH_CRASH_CORE_H
+#ifndef _LOONGARCH_CRASH_RESERVE_H
+#define _LOONGARCH_CRASH_RESERVE_H
#define CRASH_ALIGN SZ_2M
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index 4a8adcca329b..c2f9979b2979 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -14,11 +14,6 @@
#include <asm/pgtable-bits.h>
#include <asm/string.h>
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
@@ -73,6 +68,21 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define __io_aw() mmiowb()
+#ifdef CONFIG_KFENCE
+#define virt_to_phys(kaddr) \
+({ \
+ (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
+ page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
+})
+
+#define phys_to_virt(paddr) \
+({ \
+ extern char *__kfence_pool; \
+ (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
+ page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
+})
+#endif
+
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
index 6c82aea1c993..a6a5760da3a3 100644
--- a/arch/loongarch/include/asm/kfence.h
+++ b/arch/loongarch/include/asm/kfence.h
@@ -16,6 +16,7 @@
static inline bool arch_kfence_init_pool(void)
{
int err;
+ char *kaddr, *vaddr;
char *kfence_pool = __kfence_pool;
struct vm_struct *area;
@@ -35,6 +36,14 @@ static inline bool arch_kfence_init_pool(void)
return false;
}
+ kaddr = kfence_pool;
+ vaddr = __kfence_pool;
+ while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
+ set_page_address(virt_to_page(kaddr), vaddr);
+ kaddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+
return true;
}
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index 44027060c54a..e85df33f11c7 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -78,7 +78,26 @@ typedef struct { unsigned long pgprot; } pgprot_t;
struct page *dmw_virt_to_page(unsigned long kaddr);
struct page *tlb_virt_to_page(unsigned long kaddr);
-#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
+#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
+#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
+
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
+
+#ifndef CONFIG_KFENCE
+
+#define page_to_virt(page) __va(page_to_phys(page))
+#define virt_to_page(kaddr) phys_to_page(__pa(kaddr))
+
+#else
+
+#define WANT_PAGE_VIRTUAL
+
+#define page_to_virt(page) \
+({ \
+ extern char *__kfence_pool; \
+ (__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page); \
+})
#define virt_to_page(kaddr) \
({ \
@@ -86,6 +105,11 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
})
+#endif
+
+#define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
+#define virt_to_pfn(kaddr) page_to_pfn(virt_to_page(kaddr))
+
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 2a35a0bc2aaa..52b638059e40 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -7,6 +7,14 @@
#ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__
+#include <asm/ptrace.h>
+
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->csr_era = (__ip); \
+ (regs)->regs[3] = current_stack_pointer; \
+ (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+}
+
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
index da7a3b5b9374..e071f5e9e858 100644
--- a/arch/loongarch/include/asm/tlb.h
+++ b/arch/loongarch/include/asm/tlb.h
@@ -132,8 +132,6 @@ static __always_inline void invtlb_all(u32 op, u32 info, u64 addr)
);
}
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
static void tlb_flush(struct mmu_gather *tlb);
#define tlb_flush tlb_flush
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index 0491bf453cd4..cac7cba81b65 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -884,4 +884,4 @@ static int __init init_hw_perf_events(void)
return 0;
}
-early_initcall(init_hw_perf_events);
+pure_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 1fc2f6813ea0..97b40defde06 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -202,10 +202,10 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
- goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
+ goto bad_area;
}
/*
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index a9630a81b38a..89af7c12e8c0 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -4,6 +4,7 @@
*/
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/kfence.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -111,6 +112,9 @@ int __virt_addr_valid(volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
+ if (is_kfence_address((void *)kaddr))
+ return 1;
+
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
return 0;
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 2aae72e63871..bda018150000 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -11,13 +11,13 @@
struct page *dmw_virt_to_page(unsigned long kaddr)
{
- return pfn_to_page(virt_to_pfn(kaddr));
+ return phys_to_page(__pa(kaddr));
}
EXPORT_SYMBOL(dmw_virt_to_page);
struct page *tlb_virt_to_page(unsigned long kaddr)
{
- return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+ return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
}
EXPORT_SYMBOL(tlb_virt_to_page);
diff --git a/arch/m68k/include/asm/pgtable.h b/arch/m68k/include/asm/pgtable.h
index 27525c6a12fd..49fcfd734860 100644
--- a/arch/m68k/include/asm/pgtable.h
+++ b/arch/m68k/include/asm/pgtable.h
@@ -2,6 +2,8 @@
#ifndef __M68K_PGTABLE_H
#define __M68K_PGTABLE_H
+#include <asm/page.h>
+
#ifdef __uClinux__
#include <asm/pgtable_no.h>
#else
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 06ef440d16ce..516dc7022bd7 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -619,15 +619,6 @@ config MACH_EYEQ5
bool
-config FIT_IMAGE_FDT_EPM5
- bool "Include FDT for Mobileye EyeQ5 development platforms"
- depends on MACH_EYEQ5
- default n
- help
- Enable this to include the FDT for the EyeQ5 development platforms
- from Mobileye in the FIT kernel image.
- This requires u-boot on the platform.
-
config MACH_NINTENDO64
bool "Nintendo 64 console"
select CEVT_R4K
@@ -1011,6 +1002,15 @@ config CAVIUM_OCTEON_SOC
endchoice
+config FIT_IMAGE_FDT_EPM5
+ bool "Include FDT for Mobileye EyeQ5 development platforms"
+ depends on MACH_EYEQ5
+ default n
+ help
+ Enable this to include the FDT for the EyeQ5 development platforms
+ from Mobileye in the FIT kernel image.
+ This requires u-boot on the platform.
+
source "arch/mips/alchemy/Kconfig"
source "arch/mips/ath25/Kconfig"
source "arch/mips/ath79/Kconfig"
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index d14d0e37ad02..4a2b40ce39e0 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -159,7 +159,7 @@ extern unsigned long exception_ip(struct pt_regs *regs);
#define exception_ip(regs) exception_ip(regs)
#define profile_pc(regs) instruction_pointer(regs)
-extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
extern void die(const char *, struct pt_regs *) __noreturn;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index d1b11f66f748..cb1045ebab06 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_REGS, thread_info, regs);
+ OFFSET(TI_SYSCALL, thread_info, syscall);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 59288c13b581..61503a36067e 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1317,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request,
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
-asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
- current_thread_info()->syscall = syscall;
-
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (ptrace_report_syscall_entry(regs))
return -1;
- syscall = current_thread_info()->syscall;
}
#ifdef CONFIG_SECCOMP
@@ -1335,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
struct seccomp_data sd;
unsigned long args[6];
- sd.nr = syscall;
+ sd.nr = current_thread_info()->syscall;
sd.arch = syscall_get_arch(current);
syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++)
@@ -1345,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
ret = __secure_computing(&sd);
if (ret == -1)
return ret;
- syscall = current_thread_info()->syscall;
}
#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
- audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
+ audit_syscall_entry(current_thread_info()->syscall,
+ regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
/*
* Negative syscall numbers are mistaken for rejected syscalls, but
* won't have had the return value set appropriately, so we do so now.
*/
- if (syscall < 0)
+ if (current_thread_info()->syscall < 0)
syscall_set_return_value(current, regs, -ENOSYS, 0);
- return syscall;
+ return current_thread_info()->syscall;
}
/*
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 18dc9b345056..2c604717e630 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -77,6 +77,18 @@ loads_done:
PTR_WD load_a7, bad_stack_a7
.previous
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
+ b 2f
+1:
+ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
+2:
+
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
@@ -114,16 +126,7 @@ syscall_trace_entry:
SAVE_STATIC
move a0, sp
- /*
- * syscall number is in v0 unless we called syscall(__NR_###)
- * where the real syscall number is in a0
- */
- move a1, v0
- subu t2, v0, __NR_O32_Linux
- bnez t2, 1f /* __NR_syscall at offset 0 */
- lw a1, PT_R4(sp)
-
-1: jal syscall_trace_enter
+ jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 97456b2ca7dc..97788859238c 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
+ LONG_S v0, TI_SYSCALL($28) # Store syscall number
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -72,7 +74,6 @@ syscall_common:
n32_syscall_trace_entry:
SAVE_STATIC
move a0, sp
- move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
index e6264aa62e45..be11ea5cc67e 100644
--- a/arch/mips/kernel/scall64-n64.S
+++ b/arch/mips/kernel/scall64-n64.S
@@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
+ LONG_S v0, TI_SYSCALL($28) # Store syscall number
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -82,7 +84,6 @@ n64_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
move a0, sp
- move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d3c2616cba22..7a5abb73e531 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -79,6 +79,22 @@ loads_done:
PTR_WD load_a7, bad_stack_a7
.previous
+ /*
+ * absolute syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
+ b 2f
+1:
+ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
+2:
+
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -113,22 +129,7 @@ trace_a_syscall:
sd a7, PT_R11(sp) # For indirect syscalls
move a0, sp
- /*
- * absolute syscall number is in v0 unless we called syscall(__NR_###)
- * where the real syscall number is in a0
- * note: NR_syscall is the first O32 syscall but the macro is
- * only defined when compiling with -mabi=32 (CONFIG_32BIT)
- * therefore __NR_O32_Linux is used (4000)
- */
- .set push
- .set reorder
- subu t1, v0, __NR_O32_Linux
- move a1, v0
- bnez t1, 1f /* __NR_syscall at offset 0 */
- ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
- .set pop
-
-1: jal syscall_trace_enter
+ jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 8d98af5c7201..9a8393e6b4a8 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -21,7 +21,8 @@
void __init early_init_devtree(void *params)
{
- __be32 *dtb = (u32 *)__dtb_start;
+ __be32 __maybe_unused *dtb = (u32 *)__dtb_start;
+
#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
OF_DT_HEADER) {
@@ -30,8 +31,11 @@ void __init early_init_devtree(void *params)
return;
}
#endif
+
+#ifdef CONFIG_NIOS2_DTB_SOURCE_BOOL
if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
params = (void *)__dtb_start;
+#endif
early_init_dt_scan(params);
}
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index ee4febb30386..5ce258f3fffa 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -131,7 +131,7 @@ CONFIG_PPDEV=m
CONFIG_I2C=y
CONFIG_HWMON=m
CONFIG_DRM=m
-CONFIG_DRM_DP_CEC=y
+CONFIG_DRM_DISPLAY_DP_AUX_CEC=y
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_SIL164 is not set
CONFIG_DRM_RADEON=m
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 000000000000..47c5a1991d10
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+/* PARISC cannot allow mdwe as it needs writable stacks */
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return false;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
index 74fb86b0d209..7c728755852e 100644
--- a/arch/powerpc/crypto/chacha-p10-glue.c
+++ b/arch/powerpc/crypto/chacha-p10-glue.c
@@ -197,6 +197,9 @@ static struct skcipher_alg algs[] = {
static int __init chacha_p10_init(void)
{
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ return 0;
+
static_branch_enable(&have_p10);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
@@ -204,10 +207,13 @@ static int __init chacha_p10_init(void)
static void __exit chacha_p10_exit(void)
{
+ if (!static_branch_likely(&have_p10))
+ return;
+
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
+module_init(chacha_p10_init);
module_exit(chacha_p10_exit);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index f0a4cf01e85c..78302f6c2580 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -4,7 +4,6 @@
#ifndef __ASSEMBLY__
-#include <asm/page.h>
#include <asm/vdso/timebase.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
@@ -95,7 +94,7 @@ const struct vdso_data *__arch_get_vdso_data(void);
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
- return (void *)vd + PAGE_SIZE;
+ return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
}
#endif
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1185efebf032..29a8c8e18585 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1285,15 +1285,14 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
+ struct iommu_group *grp;
/* At first attach the ownership is already set */
- if (!domain) {
- iommu_group_put(grp);
+ if (!domain)
return 0;
- }
+ grp = iommu_group_get(dev);
table_group = iommu_group_get_iommudata(grp);
/*
* The domain being set to PLATFORM from earlier
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 910ba8837add..2acc7d876e1f 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -82,14 +82,14 @@ config ERRATA_THEAD
Otherwise, please say "N" here to avoid unnecessary overhead.
-config ERRATA_THEAD_PBMT
- bool "Apply T-Head memory type errata"
+config ERRATA_THEAD_MAE
+ bool "Apply T-Head's memory attribute extension (XTheadMae) errata"
depends on ERRATA_THEAD && 64BIT && MMU
select RISCV_ALTERNATIVE_EARLY
default y
help
- This will apply the memory type errata to handle the non-standard
- memory type bits in page-table-entries on T-Head SoCs.
+ This will apply the memory attribute extension errata to handle the
+ non-standard PTE utilization on T-Head SoCs (XTheadMae).
If you don't know what to do here, say "Y".
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 252d63942f34..5b3115a19852 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -151,7 +151,7 @@ endif
endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
-vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
+vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
ifneq ($(CONFIG_XIP_KERNEL),y)
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index b1c410bbc1ae..bf6a0a6318ee 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -19,20 +19,26 @@
#include <asm/patch.h>
#include <asm/vendorid_list.h>
-static bool errata_probe_pbmt(unsigned int stage,
- unsigned long arch_id, unsigned long impid)
+#define CSR_TH_SXSTATUS 0x5c0
+#define SXSTATUS_MAEE _AC(0x200000, UL)
+
+static bool errata_probe_mae(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
{
- if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT))
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
return false;
if (arch_id != 0 || impid != 0)
return false;
- if (stage == RISCV_ALTERNATIVES_EARLY_BOOT ||
- stage == RISCV_ALTERNATIVES_MODULE)
- return true;
+ if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
+ stage != RISCV_ALTERNATIVES_MODULE)
+ return false;
+
+ if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
+ return false;
- return false;
+ return true;
}
/*
@@ -140,8 +146,8 @@ static u32 thead_errata_probe(unsigned int stage,
{
u32 cpu_req_errata = 0;
- if (errata_probe_pbmt(stage, archid, impid))
- cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
+ if (errata_probe_mae(stage, archid, impid))
+ cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
errata_probe_cmo(stage, archid, impid);
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 1f2dbfb8a8bf..efd851e1b483 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -23,7 +23,7 @@
#endif
#ifdef CONFIG_ERRATA_THEAD
-#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
#define ERRATA_THEAD_NUMBER 2
#endif
@@ -53,20 +53,20 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
* in the default case.
*/
#define ALT_SVPBMT_SHIFT 61
-#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_THEAD_MAE_SHIFT 59
#define ALT_SVPBMT(_val, prot) \
asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
"li %0, %1\t\nslli %0,%0,%3", 0, \
RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
"li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "=r"(_val) \
: "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
- "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_MAE_SHIFT), \
"I"(ALT_SVPBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT))
+ "I"(ALT_THEAD_MAE_SHIFT))
-#ifdef CONFIG_ERRATA_THEAD_PBMT
+#ifdef CONFIG_ERRATA_THEAD_MAE
/*
* IO/NOCACHE memory types are handled together with svpbmt,
* so on T-Head chips, check if no other memory type is set,
@@ -83,11 +83,11 @@ asm volatile(ALTERNATIVE( \
"slli t3, t3, %3\n\t" \
"or %0, %0, t3\n\t" \
"2:", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "+r"(_val) \
- : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(ALT_THEAD_MAE_SHIFT) \
: "t3")
#else
#define ALT_THEAD_PMA(_val)
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 2947423b5082..115ac98b8d72 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
* the boot process (before kernel_map.va_pa_offset is set).
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 97fcde30e247..6afd6bb4882e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -593,6 +593,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
+#define pgprot_nx pgprot_nx
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
@@ -890,7 +896,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
-#define TASK_SIZE 0xffffffffUL
+#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index 980094c2e976..ac80216549ff 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -36,7 +36,8 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
ulong) \
__attribute__((alias(__stringify(___se_##prefix##name)))); \
__diag_pop(); \
- static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __used; \
static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
#define SC_RISCV_REGS_TO_ARGS(x, ...) \
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index ec0cab9fbddd..72ec1d9bd3f3 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- long __kr_err; \
+ long __kr_err = 0; \
\
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
if (unlikely(__kr_err)) \
@@ -328,7 +328,7 @@ do { \
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- long __kr_err; \
+ long __kr_err = 0; \
\
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
if (unlikely(__kr_err)) \
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 10aaa83db89e..95050ebe9ad0 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -34,7 +34,7 @@
#define AT_L3_CACHEGEOMETRY 47
/* entries in ARCH_DLINFO */
-#define AT_VECTOR_SIZE_ARCH 9
+#define AT_VECTOR_SIZE_ARCH 10
#define AT_MINSIGSTKSZ 51
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 9f2a8e3ff204..2902f68dc913 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -54,7 +54,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
-#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index 62fa393b2eb2..3df4cb788c1f 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -74,5 +74,5 @@ quiet_cmd_compat_vdsold = VDSOLD $@
rm $@.tmp
# actual build commands
-quiet_cmd_compat_vdsoas = VDSOAS $@
+quiet_cmd_compat_vdsoas = VDSOAS $@
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 37e87fdcf6a0..30e12b310cab 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
*/
lockdep_assert_held(&text_mutex);
+ preempt_disable();
+
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
@@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
+ preempt_enable();
+
return 0;
}
NOKPROBE_SYMBOL(__patch_insn_set);
@@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
+ preempt_disable();
+
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
@@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
+ preempt_enable();
+
return ret;
}
NOKPROBE_SYMBOL(__patch_insn_write);
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 92922dbd5b5c..e4bc61c4e58a 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -27,8 +27,6 @@
#include <asm/vector.h>
#include <asm/cpufeature.h>
-register unsigned long gp_in_global __asm__("gp");
-
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
@@ -37,7 +35,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
extern asmlinkage void ret_from_fork(void);
-void arch_cpu_idle(void)
+void noinstr arch_cpu_idle(void)
{
cpu_do_idle();
}
@@ -207,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (unlikely(args->fn)) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
- childregs->gp = gp_in_global;
/* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 501e66debf69..5a2edd7f027e 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -119,6 +119,13 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
struct __sc_riscv_v_state __user *state = sc_vec;
void __user *datap;
+ /*
+ * Mark the vstate as clean prior performing the actual copy,
+ * to avoid getting the vstate incorrectly clobbered by the
+ * discarded vector state.
+ */
+ riscv_v_vstate_set_restore(current, regs);
+
/* Copy everything of __sc_riscv_v_state except datap. */
err = __copy_from_user(&current->thread.vstate, &state->v_state,
offsetof(struct __riscv_v_ext_state, datap));
@@ -133,13 +140,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
* Copy the whole vector content from user space datap. Use
* copy_from_user to prevent information leak.
*/
- err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
- if (unlikely(err))
- return err;
-
- riscv_v_vstate_set_restore(current, regs);
-
- return err;
+ return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
}
#else
#define save_v_state(task, regs) (0)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 868d6280cf66..05a16b1f0aee 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -122,7 +122,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
__show_regs(regs);
- dump_instr(KERN_EMERG, regs);
+ dump_instr(KERN_INFO, regs);
}
force_sig_fault(signo, code, (void __user *)addr);
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 9b517fe1b8a8..272c431ac5b9 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -37,6 +37,7 @@ endif
# Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
+CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n
diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
index 39e72aa016a4..b467ba5ed910 100644
--- a/arch/riscv/kvm/aia_aplic.c
+++ b/arch/riscv/kvm/aia_aplic.c
@@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
raw_spin_lock_irqsave(&irqd->lock, flags);
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
- if (!pending &&
- ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
- (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
goto skip_write_pending;
+ if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
+ sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
+ if (!pending)
+ goto skip_write_pending;
+ if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
+ goto skip_write_pending;
+ if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
+ goto skip_write_pending;
+ }
+
if (pending)
irqd->state |= APLIC_IRQ_STATE_PENDING;
else
@@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
static bool aplic_read_input(struct aplic *aplic, u32 irq)
{
- bool ret;
- unsigned long flags;
+ u32 sourcecfg, sm, raw_input, irq_inverted;
struct aplic_irq *irqd;
+ unsigned long flags;
+ bool ret = false;
if (!irq || aplic->nr_irqs <= irq)
return false;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
- ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
+
+ sourcecfg = irqd->sourcecfg;
+ if (sourcecfg & APLIC_SOURCECFG_D)
+ goto skip;
+
+ sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
+ goto skip;
+
+ raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
+ irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
+ sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
+ ret = !!(raw_input ^ irq_inverted);
+
+skip:
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index f4a6124d25c9..994adc26db4b 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -986,7 +986,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
{
- return copy_isa_ext_reg_indices(vcpu, NULL);;
+ return copy_isa_ext_reg_indices(vcpu, NULL);
}
static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fe8e159394d8..968761843203 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -231,7 +231,7 @@ static void __init setup_bootmem(void)
* In 64-bit, any use of __va/__pa before this point is wrong as we
* did not know the start of DRAM before.
*/
- if (IS_ENABLED(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 893566e004b7..07d743f87b3f 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -99,7 +99,7 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
unsigned long start, unsigned long size,
unsigned long stride)
{
@@ -200,7 +200,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+ __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
start, end - start, PAGE_SIZE);
}
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index aac190085472..1adf2f39ce59 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1463,6 +1463,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
if (ret < 0)
return ret;
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ const struct btf_func_model *fm;
+ int idx;
+
+ fm = bpf_jit_find_kfunc_model(ctx->prog, insn);
+ if (!fm)
+ return -EINVAL;
+
+ for (idx = 0; idx < fm->nr_args; idx++) {
+ u8 reg = bpf_to_rv_reg(BPF_REG_1 + idx, ctx);
+
+ if (fm->arg_size[idx] == sizeof(int))
+ emit_sextw(reg, reg, ctx);
+ }
+ }
+
ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7138d189cc42..0c4cad7d5a5b 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,31 +15,31 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-static inline int arch_atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return __atomic_read(v);
}
#define arch_atomic_read arch_atomic_read
-static inline void arch_atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
__atomic_set(v, i);
}
#define arch_atomic_set arch_atomic_set
-static inline int arch_atomic_add_return(int i, atomic_t *v)
+static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter) + i;
}
#define arch_atomic_add_return arch_atomic_add_return
-static inline int arch_atomic_fetch_add(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
-static inline void arch_atomic_add(int i, atomic_t *v)
+static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
__atomic_add(i, &v->counter);
}
@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
#define ATOMIC_OPS(op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
+static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__atomic_##op(i, &v->counter); \
} \
-static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic_##op##_barrier(i, &v->counter); \
}
@@ -74,7 +74,7 @@ ATOMIC_OPS(xor)
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return __atomic_cmpxchg(&v->counter, old, new);
}
@@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
#define ATOMIC64_INIT(i) { (i) }
-static inline s64 arch_atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return __atomic64_read(v);
}
#define arch_atomic64_read arch_atomic64_read
-static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__atomic64_set(v, i);
}
#define arch_atomic64_set arch_atomic64_set
-static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
#define arch_atomic64_add_return arch_atomic64_add_return
-static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static inline void arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__atomic64_add(i, (long *)&v->counter);
}
@@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v)
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return __atomic64_cmpxchg((long *)&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define ATOMIC64_OPS(op) \
-static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
-{ \
- __atomic64_##op(i, (long *)&v->counter); \
-} \
-static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
-{ \
- return __atomic64_##op##_barrier(i, (long *)&v->counter); \
+#define ATOMIC64_OPS(op) \
+static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ __atomic64_##op(i, (long *)&v->counter); \
+} \
+static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
+{ \
+ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
}
ATOMIC64_OPS(and)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 50510e08b893..7fa5f96a553a 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -8,7 +8,7 @@
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
-static inline int __atomic_read(const atomic_t *v)
+static __always_inline int __atomic_read(const atomic_t *v)
{
int c;
@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
return c;
}
-static inline void __atomic_set(atomic_t *v, int i)
+static __always_inline void __atomic_set(atomic_t *v, int i)
{
asm volatile(
" st %1,%0\n"
: "=R" (v->counter) : "d" (i));
}
-static inline s64 __atomic64_read(const atomic64_t *v)
+static __always_inline s64 __atomic64_read(const atomic64_t *v)
{
s64 c;
@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
return c;
}
-static inline void __atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
{
asm volatile(
" stg %1,%0\n"
@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
-static inline op_type op_name(op_type val, op_type *ptr) \
+static __always_inline op_type op_name(op_type val, op_type *ptr) \
{ \
op_type old; \
\
@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
-static inline int op_name(int val, int *ptr) \
+static __always_inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
-static inline long op_name(long val, long *ptr) \
+static __always_inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
asm volatile(
" cs %[old],%[new],%[ptr]"
@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
return old;
}
-static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
+static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
int old_expected = old;
@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
return old == old_expected;
}
-static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
asm volatile(
" csg %[old],%[new],%[ptr]"
@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
return old;
}
-static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
long old_expected = old;
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index bf15da0fedbc..0e3da500e98c 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -12,12 +12,12 @@
#define PREEMPT_NEED_RESCHED 0x80000000
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
-static inline int preempt_count(void)
+static __always_inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
}
-static inline void preempt_count_set(int pc)
+static __always_inline void preempt_count_set(int pc)
{
int old, new;
@@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
old, new) != old);
}
-static inline void set_preempt_need_resched(void)
+static __always_inline void set_preempt_need_resched(void)
{
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
-static inline void clear_preempt_need_resched(void)
+static __always_inline void clear_preempt_need_resched(void)
{
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
-static inline bool test_preempt_need_resched(void)
+static __always_inline bool test_preempt_need_resched(void)
{
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
}
-static inline void __preempt_count_add(int val)
+static __always_inline void __preempt_count_add(int val)
{
/*
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
@@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
__atomic_add(val, &S390_lowcore.preempt_count);
}
-static inline void __preempt_count_sub(int val)
+static __always_inline void __preempt_count_sub(int val)
{
__preempt_count_add(-val);
}
-static inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool __preempt_count_dec_and_test(void)
{
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
}
-static inline bool should_resched(int preempt_offset)
+static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
preempt_offset);
@@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
#define PREEMPT_ENABLED (0)
-static inline int preempt_count(void)
+static __always_inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count);
}
-static inline void preempt_count_set(int pc)
+static __always_inline void preempt_count_set(int pc)
{
S390_lowcore.preempt_count = pc;
}
-static inline void set_preempt_need_resched(void)
+static __always_inline void set_preempt_need_resched(void)
{
}
-static inline void clear_preempt_need_resched(void)
+static __always_inline void clear_preempt_need_resched(void)
{
}
-static inline bool test_preempt_need_resched(void)
+static __always_inline bool test_preempt_need_resched(void)
{
return false;
}
-static inline void __preempt_count_add(int val)
+static __always_inline void __preempt_count_add(int val)
{
S390_lowcore.preempt_count += val;
}
-static inline void __preempt_count_sub(int val)
+static __always_inline void __preempt_count_sub(int val)
{
S390_lowcore.preempt_count -= val;
}
-static inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool __preempt_count_dec_and_test(void)
{
return !--S390_lowcore.preempt_count && tif_need_resched();
}
-static inline bool should_resched(int preempt_offset)
+static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(preempt_count() == preempt_offset &&
tif_need_resched());
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 787394978bc0..6a1e0fbbaa15 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -340,7 +340,8 @@ SYM_CODE_START(pgm_check_handler)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
- lg %r12,__LC_GMAP
+ ltg %r12,__LC_GMAP
+ jz 5f
clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11)
jne 5f
BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST
@@ -635,6 +636,7 @@ SYM_DATA_START_LOCAL(daton_psw)
SYM_DATA_END(daton_psw)
.section .rodata, "a"
+ .balign 8
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
SYM_DATA_START(sys_call_table)
#include "asm/syscall_table.h"
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 823d652e3917..4ad472d130a3 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -90,7 +90,6 @@ static void paicrypt_event_destroy(struct perf_event *event)
event->cpu);
struct paicrypt_map *cpump = mp->mapptr;
- cpump->event = NULL;
static_branch_dec(&pai_key);
mutex_lock(&pai_reserve_mutex);
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
@@ -356,10 +355,15 @@ static int paicrypt_add(struct perf_event *event, int flags)
static void paicrypt_stop(struct perf_event *event, int flags)
{
- if (!event->attr.sample_period) /* Counting */
+ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+ struct paicrypt_map *cpump = mp->mapptr;
+
+ if (!event->attr.sample_period) { /* Counting */
paicrypt_read(event);
- else /* Sampling */
+ } else { /* Sampling */
perf_sched_cb_dec(event->pmu);
+ cpump->event = NULL;
+ }
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 616a25606cd6..a6da7e0cc7a6 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -122,7 +122,6 @@ static void paiext_event_destroy(struct perf_event *event)
free_page(PAI_SAVE_AREA(event));
mutex_lock(&paiext_reserve_mutex);
- cpump->event = NULL;
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
paiext_free(mp);
paiext_root_free();
@@ -362,10 +361,15 @@ static int paiext_add(struct perf_event *event, int flags)
static void paiext_stop(struct perf_event *event, int flags)
{
- if (!event->attr.sample_period) /* Counting */
+ struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
+ struct paiext_map *cpump = mp->mapptr;
+
+ if (!event->attr.sample_period) { /* Counting */
paiext_read(event);
- else /* Sampling */
+ } else { /* Sampling */
perf_sched_cb_dec(event->pmu);
+ cpump->event = NULL;
+ }
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index c421dd44ffbe..0c66b32e0f9f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -75,7 +75,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_PGSTE))
return KERNEL_FAULT;
gmap = (struct gmap *)S390_lowcore.gmap;
- if (regs->cr1 == gmap->asce)
+ if (gmap && gmap->asce == regs->cr1)
return GMAP_FAULT;
return KERNEL_FAULT;
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index b418333bb086..5af0402e94b8 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
* PLT for hotpatchable calls. The calling convention is the same as for the
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
*/
-extern const char bpf_plt[];
-extern const char bpf_plt_ret[];
-extern const char bpf_plt_target[];
-extern const char bpf_plt_end[];
-#define BPF_PLT_SIZE 32
+struct bpf_plt {
+ char code[16];
+ void *ret;
+ void *target;
+} __packed;
+extern const struct bpf_plt bpf_plt;
asm(
".pushsection .rodata\n"
" .balign 8\n"
@@ -531,15 +532,14 @@ asm(
" .balign 8\n"
"bpf_plt_ret: .quad 0\n"
"bpf_plt_target: .quad 0\n"
- "bpf_plt_end:\n"
" .popsection\n"
);
-static void bpf_jit_plt(void *plt, void *ret, void *target)
+static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
{
- memcpy(plt, bpf_plt, BPF_PLT_SIZE);
- *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
- *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
+ memcpy(plt, &bpf_plt, sizeof(*plt));
+ plt->ret = ret;
+ plt->target = target;
}
/*
@@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
jit->prg = ALIGN(jit->prg, 8);
jit->prologue_plt = jit->prg;
if (jit->prg_buf)
- bpf_jit_plt(jit->prg_buf + jit->prg,
+ bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
jit->prg_buf + jit->prologue_plt_ret, NULL);
- jit->prg += BPF_PLT_SIZE;
+ jit->prg += sizeof(struct bpf_plt);
}
static int get_probe_mem_regno(const u8 *insn)
@@ -2040,9 +2040,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
- if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
- return orig_fp;
-
if (!fp->jit_requested)
return orig_fp;
@@ -2148,14 +2145,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
+ struct bpf_plt expected_plt, current_plt, new_plt, *plt;
struct {
u16 opc;
s32 disp;
} __packed insn;
- char expected_plt[BPF_PLT_SIZE];
- char current_plt[BPF_PLT_SIZE];
- char new_plt[BPF_PLT_SIZE];
- char *plt;
char *ret;
int err;
@@ -2174,18 +2168,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
*/
} else {
/* Verify the PLT. */
- plt = (char *)ip + (insn.disp << 1);
- err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+ plt = ip + (insn.disp << 1);
+ err = copy_from_kernel_nofault(&current_plt, plt,
+ sizeof(current_plt));
if (err < 0)
return err;
ret = (char *)ip + 6;
- bpf_jit_plt(expected_plt, ret, old_addr);
- if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+ bpf_jit_plt(&expected_plt, ret, old_addr);
+ if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
return -EINVAL;
/* Adjust the call address. */
- bpf_jit_plt(new_plt, ret, new_addr);
- s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
- new_plt + (bpf_plt_target - bpf_plt),
+ bpf_jit_plt(&new_plt, ret, new_addr);
+ s390_kernel_write(&plt->target, &new_plt.target,
sizeof(void *));
}
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 6a1f36df6a18..cf0ad89f5639 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -28,7 +28,7 @@ obj-y += net/
obj-$(CONFIG_KEXEC_FILE) += purgatory/
-obj-y += virt/svm/
+obj-y += virt/
# for cleaning
subdir- += boot tools
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 39886bab943a..928820e61cb5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -62,6 +62,7 @@ config X86
select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
@@ -2439,6 +2440,8 @@ config USE_X86_SEG_SUPPORT
# with named address spaces - see GCC PR sanitizer/111736.
#
depends on !KASAN
+ # -fsanitize=thread (KCSAN) is also incompatible.
+ depends on !KCSAN
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
@@ -2486,17 +2489,21 @@ config PREFIX_SYMBOLS
def_bool y
depends on CALL_PADDING && !CFI_CLANG
-menuconfig SPECULATION_MITIGATIONS
- bool "Mitigations for speculative execution vulnerabilities"
+menuconfig CPU_MITIGATIONS
+ bool "Mitigations for CPU vulnerabilities"
default y
help
- Say Y here to enable options which enable mitigations for
- speculative execution hardware vulnerabilities.
+ Say Y here to enable options which enable mitigations for hardware
+ vulnerabilities (usually related to speculative execution).
+ Mitigations can be disabled or restricted to SMT systems at runtime
+ via the "mitigations" kernel parameter.
- If you say N, all mitigations will be disabled. You really
- should know what you are doing to say so.
+ If you say N, all mitigations will be disabled. This CANNOT be
+ overridden at runtime.
-if SPECULATION_MITIGATIONS
+ Say 'Y', unless you really know what you are doing.
+
+if CPU_MITIGATIONS
config MITIGATION_PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
@@ -2631,6 +2638,16 @@ config MITIGATION_RFDS
stored in floating point, vector and integer registers.
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+config MITIGATION_SPECTRE_BHI
+ bool "Mitigate Spectre-BHB (Branch History Injection)"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
+ where the branch history buffer is poisoned to speculatively steer
+ indirect branches.
+ See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 662d9d4033e6..5ab93fcdd691 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -251,8 +251,6 @@ archheaders:
libs-y += arch/x86/lib/
-core-y += arch/x86/virt/
-
# drivers-y are linked after core-y
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index 719e939050cb..876fc6d46a13 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -15,10 +15,12 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
+#include <asm/setup.h>
.code64
.text
@@ -149,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START(efi32_stub_entry)
call 1f
1: popl %ecx
+ leal (efi32_boot_args - 1b)(%ecx), %ebx
/* Clear BSS */
xorl %eax, %eax
@@ -163,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
popl %ecx
popl %edx
popl %esi
+ movl %esi, 8(%ebx)
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
@@ -239,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
*
* Arguments: %ecx image handle
* %edx EFI system table pointer
- * %esi struct bootparams pointer (or NULL when not using
- * the EFI handover protocol)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
@@ -266,9 +268,18 @@ SYM_FUNC_START_LOCAL(efi32_entry)
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
movl %edx, 4(%ebx)
- movl %esi, 8(%ebx)
movb $0x0, 12(%ebx) // efi_is64
+ /*
+ * Allocate some memory for a temporary struct boot_params, which only
+ * needs the minimal pieces that startup_32() relies on.
+ */
+ subl $PARAM_SIZE, %esp
+ movl %esp, %esi
+ movl $PAGE_SIZE, BP_kernel_alignment(%esi)
+ movl $_end - 1b, BP_init_size(%esi)
+ subl $startup_32 - 1b, BP_init_size(%esi)
+
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -294,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
- xorl %esi, %esi
- jmp efi32_entry // pass %ecx, %edx, %esi
+ jmp efi32_entry // pass %ecx, %edx
// no other registers remain live
2: popl %edi // restore callee-save registers
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index d07be9d05cd0..b31ef2424d19 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -3,19 +3,28 @@
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
+ * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#include <linux/export.h>
#include <linux/cc_platform.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <asm/archrandom.h>
#include <asm/coco.h>
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
u64 cc_mask __ro_after_init;
+static struct cc_attr_flags {
+ __u64 host_sev_snp : 1,
+ __resv : 63;
+} cc_flags;
+
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{
switch (attr) {
@@ -89,6 +98,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr)
case CC_ATTR_GUEST_SEV_SNP:
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
+ case CC_ATTR_HOST_SEV_SNP:
+ return cc_flags.host_sev_snp;
+
default:
return false;
}
@@ -148,3 +160,84 @@ u64 cc_mkdec(u64 val)
}
}
EXPORT_SYMBOL_GPL(cc_mkdec);
+
+static void amd_cc_platform_clear(enum cc_attr attr)
+{
+ switch (attr) {
+ case CC_ATTR_HOST_SEV_SNP:
+ cc_flags.host_sev_snp = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+void cc_platform_clear(enum cc_attr attr)
+{
+ switch (cc_vendor) {
+ case CC_VENDOR_AMD:
+ amd_cc_platform_clear(attr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void amd_cc_platform_set(enum cc_attr attr)
+{
+ switch (attr) {
+ case CC_ATTR_HOST_SEV_SNP:
+ cc_flags.host_sev_snp = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+void cc_platform_set(enum cc_attr attr)
+{
+ switch (cc_vendor) {
+ case CC_VENDOR_AMD:
+ amd_cc_platform_set(attr);
+ break;
+ default:
+ break;
+ }
+}
+
+__init void cc_random_init(void)
+{
+ /*
+ * The seed is 32 bytes (in units of longs), which is 256 bits, which
+ * is the security level that the RNG is targeting.
+ */
+ unsigned long rng_seed[32 / sizeof(long)];
+ size_t i, longs;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ return;
+
+ /*
+ * Since the CoCo threat model includes the host, the only reliable
+ * source of entropy that can be neither observed nor manipulated is
+ * RDRAND. Usually, RDRAND failure is considered tolerable, but since
+ * CoCo guests have no other unobservable source of entropy, it's
+ * important to at least ensure the RNG gets some initial random seeds.
+ */
+ for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
+ longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
+
+ /*
+ * A zero return value means that the guest doesn't have RDRAND
+ * or the CPU is physically broken, and in both cases that
+ * means most crypto inside of the CoCo instance will be
+ * broken, defeating the purpose of CoCo in the first place. So
+ * just panic here because it's absolutely unsafe to continue
+ * executing.
+ */
+ if (longs == 0)
+ panic("RDRAND is defective.");
+ }
+ add_device_randomness(rng_seed, sizeof(rng_seed));
+ memzero_explicit(rng_seed, sizeof(rng_seed));
+}
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 6356060caaf3..51cc9c7cb9bd 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -49,7 +49,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
if (likely(unr < NR_syscalls)) {
unr = array_index_nospec(unr, NR_syscalls);
- regs->ax = sys_call_table[unr](regs);
+ regs->ax = x64_sys_call(regs, unr);
return true;
}
return false;
@@ -66,7 +66,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
xnr = array_index_nospec(xnr, X32_NR_syscalls);
- regs->ax = x32_sys_call_table[xnr](regs);
+ regs->ax = x32_sys_call(regs, xnr);
return true;
}
return false;
@@ -162,7 +162,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
if (likely(unr < IA32_NR_syscalls)) {
unr = array_index_nospec(unr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[unr](regs);
+ regs->ax = ia32_sys_call(regs, unr);
} else if (nr != -1) {
regs->ax = __ia32_sys_ni_syscall(regs);
}
@@ -189,7 +189,7 @@ static __always_inline bool int80_is_external(void)
}
/**
- * int80_emulation - 32-bit legacy syscall entry
+ * do_int80_emulation - 32-bit legacy syscall C entry from asm
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in
@@ -207,7 +207,7 @@ static __always_inline bool int80_is_external(void)
* eax: system call number
* ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
*/
-DEFINE_IDTENTRY_RAW(int80_emulation)
+__visible noinstr void do_int80_emulation(struct pt_regs *regs)
{
int nr;
@@ -255,6 +255,71 @@ DEFINE_IDTENTRY_RAW(int80_emulation)
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
+
+#ifdef CONFIG_X86_FRED
+/*
+ * A FRED-specific INT80 handler is warranted for the follwing reasons:
+ *
+ * 1) As INT instructions and hardware interrupts are separate event
+ * types, FRED does not preclude the use of vector 0x80 for external
+ * interrupts. As a result, the FRED setup code does not reserve
+ * vector 0x80 and calling int80_is_external() is not merely
+ * suboptimal but actively incorrect: it could cause a system call
+ * to be incorrectly ignored.
+ *
+ * 2) It is called only for handling vector 0x80 of event type
+ * EVENT_TYPE_SWINT and will never be called to handle any external
+ * interrupt (event type EVENT_TYPE_EXTINT).
+ *
+ * 3) FRED has separate entry flows depending on if the event came from
+ * user space or kernel space, and because the kernel does not use
+ * INT insns, the FRED kernel entry handler fred_entry_from_kernel()
+ * falls through to fred_bad_type() if the event type is
+ * EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling
+ * an INT insn, it can only be from a user level.
+ *
+ * 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will
+ * likely take a different approach if it is ever needed: it
+ * probably belongs in either fred_intx()/ fred_other() or
+ * asm_fred_entrypoint_user(), depending on if this ought to be done
+ * for all entries from userspace or only system
+ * calls.
+ *
+ * 5) INT $0x80 is the fast path for 32-bit system calls under FRED.
+ */
+DEFINE_FREDENTRY_RAW(int80_emulation)
+{
+ int nr;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ add_random_kstack_offset();
+
+ /*
+ * FRED pushed 0 into regs::orig_ax and regs::ax contains the
+ * syscall number.
+ *
+ * User tracing code (ptrace or signal handlers) might assume
+ * that the regs::orig_ax contains a 32-bit number on invoking
+ * a 32-bit syscall.
+ *
+ * Establish the syscall convention by saving the 32bit truncated
+ * syscall number in regs::orig_ax and by invalidating regs::ax.
+ */
+ regs->orig_ax = regs->ax & GENMASK(31, 0);
+ regs->ax = -ENOSYS;
+
+ nr = syscall_32_enter(regs);
+
+ local_irq_enable();
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+#endif
#else /* CONFIG_IA32_EMULATION */
/* Handles int $0x80 on a 32bit kernel */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8af2a26b24f6..1b5be07f8669 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
/* clobbers %rax, make sure it is after saving the syscall nr */
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
call do_syscall_64 /* returns with IRQs disabled */
@@ -1491,3 +1492,63 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
call make_task_dead
SYM_CODE_END(rewind_stack_and_make_dead)
.popsection
+
+/*
+ * This sequence executes branches in order to remove user branch information
+ * from the branch history tracker in the Branch Predictor, therefore removing
+ * user influence on subsequent BTB lookups.
+ *
+ * It should be used on parts prior to Alder Lake. Newer parts should use the
+ * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
+ * virtualized on newer hardware the VMM should protect against BHI attacks by
+ * setting BHI_DIS_S for the guests.
+ *
+ * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
+ * and not clearing the branch history. The call tree looks like:
+ *
+ * call 1
+ * call 2
+ * call 2
+ * call 2
+ * call 2
+ * call 2
+ * ret
+ * ret
+ * ret
+ * ret
+ * ret
+ * ret
+ *
+ * This means that the stack is non-constant and ORC can't unwind it with %rsp
+ * alone. Therefore we unconditionally set up the frame pointer, which allows
+ * ORC to unwind properly.
+ *
+ * The alignment is for performance and not for safety, and may be safely
+ * refactored in the future if needed.
+ */
+SYM_FUNC_START(clear_bhb_loop)
+ push %rbp
+ mov %rsp, %rbp
+ movl $5, %ecx
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call 1f
+ jmp 5f
+ .align 64, 0xcc
+ ANNOTATE_INTRA_FUNCTION_CALL
+1: call 2f
+ RET
+ .align 64, 0xcc
+2: movl $5, %eax
+3: jmp 4f
+ nop
+4: sub $1, %eax
+ jnz 3b
+ sub $1, %ecx
+ jnz 1b
+ RET
+5: lfence
+ pop %rbp
+ RET
+SYM_FUNC_END(clear_bhb_loop)
+EXPORT_SYMBOL_GPL(clear_bhb_loop)
+STACK_FRAME_NON_STANDARD(clear_bhb_loop)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index eabf48c4d4b4..c779046cc3fe 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -92,6 +92,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
@@ -206,6 +207,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
IBRS_ENTER
UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
movq %rsp, %rdi
call do_fast_syscall_32
@@ -276,3 +278,17 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
int3
SYM_CODE_END(entry_SYSCALL_compat)
+
+/*
+ * int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries
+ * point to C routines, however since this is a system call interface the branch
+ * history needs to be scrubbed to protect against BHI attacks, and that
+ * scrubbing needs to take place in assembly code prior to entering any C
+ * routines.
+ */
+SYM_CODE_START(int80_emulation)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ CLEAR_BRANCH_HISTORY
+ jmp do_int80_emulation
+SYM_CODE_END(int80_emulation)
diff --git a/arch/x86/entry/entry_fred.c b/arch/x86/entry/entry_fred.c
index ac120cbdaaf2..89c1476fcdd9 100644
--- a/arch/x86/entry/entry_fred.c
+++ b/arch/x86/entry/entry_fred.c
@@ -28,9 +28,9 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
if (regs->fred_cs.sl > 0) {
pr_emerg("PANIC: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
- regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax,
+ regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip);
- die("invalid or fatal FRED event", regs, regs->orig_ax);
+ die("invalid or fatal FRED event", regs, error_code);
panic("invalid or fatal FRED event");
} else {
unsigned long flags = oops_begin();
@@ -38,10 +38,10 @@ static noinstr void fred_bad_type(struct pt_regs *regs, unsigned long error_code
pr_alert("BUG: invalid or fatal FRED event; event type %u "
"vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n",
- regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax,
+ regs->fred_ss.type, regs->fred_ss.vector, error_code,
fred_event_data(regs), regs->cs, regs->ip);
- if (__die("Invalid or fatal FRED event", regs, regs->orig_ax))
+ if (__die("Invalid or fatal FRED event", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
@@ -66,7 +66,7 @@ static noinstr void fred_intx(struct pt_regs *regs)
/* INT80 */
case IA32_SYSCALL_VECTOR:
if (ia32_enabled())
- return int80_emulation(regs);
+ return fred_int80_emulation(regs);
fallthrough;
#endif
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 8cfc9bc73e7f..c2235bae17ef 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -18,8 +18,25 @@
#include <asm/syscalls_32.h>
#undef __SYSCALL
+/*
+ * The sys_call_table[] is no longer used for system calls, but
+ * kernel/trace/trace_syscalls.c still wants to know the system
+ * call address.
+ */
+#ifdef CONFIG_X86_32
#define __SYSCALL(nr, sym) __ia32_##sym,
-
-__visible const sys_call_ptr_t ia32_sys_call_table[] = {
+const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_32.h>
};
+#undef __SYSCALL
+#endif
+
+#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
+
+long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_32.h>
+ default: return __ia32_sys_ni_syscall(regs);
+ }
+};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index be120eec1fc9..33b3f09e6f15 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -11,8 +11,23 @@
#include <asm/syscalls_64.h>
#undef __SYSCALL
+/*
+ * The sys_call_table[] is no longer used for system calls, but
+ * kernel/trace/trace_syscalls.c still wants to know the system
+ * call address.
+ */
#define __SYSCALL(nr, sym) __x64_##sym,
-
-asmlinkage const sys_call_ptr_t sys_call_table[] = {
+const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
+#undef __SYSCALL
+
+#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
+
+long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_64.h>
+ default: return __x64_sys_ni_syscall(regs);
+ }
+};
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index bdd0e03a1265..03de4a932131 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -11,8 +11,12 @@
#include <asm/syscalls_x32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) __x64_##sym,
+#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
-asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
-#include <asm/syscalls_x32.h>
+long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_x32.h>
+ default: return __x64_sys_ni_syscall(regs);
+ }
};
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index fd63051bbbbb..3d64bcc403cf 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_X32_ABI) += vdso-image-x32.o
obj-$(CONFIG_COMPAT_32) += vdso-image-32.o vdso32-setup.o
OBJECT_FILES_NON_STANDARD_vdso-image-32.o := n
+OBJECT_FILES_NON_STANDARD_vdso-image-x32.o := n
OBJECT_FILES_NON_STANDARD_vdso-image-64.o := n
OBJECT_FILES_NON_STANDARD_vdso32-setup.o := n
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index aec16e581f5b..985ef3b47919 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -250,7 +250,7 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
/*
* AMD Performance Monitor Family 17h and later:
*/
-static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
@@ -262,10 +262,39 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
};
+static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
+};
+
+static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120,
+};
+
static u64 amd_pmu_event_map(int hw_event)
{
- if (boot_cpu_data.x86 >= 0x17)
- return amd_f17h_perfmon_event_map[hw_event];
+ if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a)
+ return amd_zen4_perfmon_event_map[hw_event];
+
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
+ return amd_zen2_perfmon_event_map[hw_event];
+
+ if (cpu_feature_enabled(X86_FEATURE_ZEN1))
+ return amd_zen1_perfmon_event_map[hw_event];
return amd_perfmon_event_map[hw_event];
}
@@ -904,8 +933,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!status)
goto done;
- /* Read branch records before unfreezing */
- if (status & GLOBAL_STATUS_LBRS_FROZEN) {
+ /* Read branch records */
+ if (x86_pmu.lbr_nr) {
amd_pmu_lbr_read();
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
}
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index 4a1e600314d5..5149830c7c4f 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -402,10 +402,12 @@ void amd_pmu_lbr_enable_all(void)
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
}
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
}
@@ -418,10 +420,12 @@ void amd_pmu_lbr_disable_all(void)
return;
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
-
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+
+ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ }
}
__init int amd_pmu_lbr_init(void)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 09050641ce5d..5b0dd07b1ef1 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
while (++i < cpuc->n_events) {
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+ cpuc->assign[i-1] = cpuc->assign[i];
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2641ba620f12..e010bfed8417 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1237,11 +1237,11 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct pmu *pmu = event->pmu;
/*
- * Make sure we get updated with the first PEBS
- * event. It will trigger also during removal, but
- * that does not hurt:
+ * Make sure we get updated with the first PEBS event.
+ * During removal, ->pebs_data_cfg is still valid for
+ * the last PEBS event. Don't clear it.
*/
- if (cpuc->n_pebs == 1)
+ if ((cpuc->n_pebs == 1) && add)
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 78cd5084104e..4367aa77cb8d 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1693,6 +1693,7 @@ void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to;
lbr->info = x86_pmu.lbr_info;
+ lbr->has_callstack = x86_pmu_has_lbr_callstack();
}
EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5fc45543e955..0569f579338b 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -105,7 +105,7 @@ static bool cpu_is_self(int cpu)
* IPI implementation on Hyper-V.
*/
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
- bool exclude_self)
+ bool exclude_self)
{
struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
@@ -132,8 +132,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
- exclude_self ? cpu_is_self : NULL);
+ nr_bank = cpumask_to_vpset_skip(&ipi_arg->vp_set, mask,
+ exclude_self ? cpu_is_self : NULL);
/*
* 'nr_bank <= 0' means some CPUs in cpumask can't be
@@ -147,7 +147,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
}
status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
- ipi_arg, NULL);
+ ipi_arg, NULL);
ipi_mask_ex_done:
local_irq_restore(flags);
@@ -155,7 +155,7 @@ ipi_mask_ex_done:
}
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
- bool exclude_self)
+ bool exclude_self)
{
int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg;
@@ -181,7 +181,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
return false;
}
- if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
return false;
/*
@@ -218,7 +218,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
}
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
- ipi_arg.cpu_mask);
+ ipi_arg.cpu_mask);
return hv_result_success(status);
do_ex_hypercall:
@@ -241,7 +241,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false;
}
- if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
return false;
if (vp >= 64)
diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c
index 68a0843d4750..3fa1f2ee7b0d 100644
--- a/arch/x86/hyperv/hv_proc.c
+++ b/arch/x86/hyperv/hv_proc.c
@@ -3,7 +3,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
-#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
@@ -116,12 +115,11 @@ free_buf:
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
{
- struct hv_add_logical_processor_in *input;
- struct hv_add_logical_processor_out *output;
+ struct hv_input_add_logical_processor *input;
+ struct hv_output_add_logical_processor *output;
u64 status;
unsigned long flags;
int ret = HV_STATUS_SUCCESS;
- int pxm = node_to_pxm(node);
/*
* When adding a logical processor, the hypervisor may return
@@ -137,11 +135,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
input->lp_index = lp_index;
input->apic_id = apic_id;
- input->flags = 0;
- input->proximity_domain_info.domain_id = pxm;
- input->proximity_domain_info.flags.reserved = 0;
- input->proximity_domain_info.flags.proximity_info_valid = 1;
- input->proximity_domain_info.flags.proximity_preferred = 1;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
input, output);
local_irq_restore(flags);
@@ -166,7 +160,6 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
u64 status;
unsigned long irq_flags;
int ret = HV_STATUS_SUCCESS;
- int pxm = node_to_pxm(node);
/* Root VPs don't seem to need pages deposited */
if (partition_id != hv_current_partition_id) {
@@ -185,14 +178,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
input->vp_index = vp_index;
input->flags = flags;
input->subnode_type = HvSubnodeAny;
- if (node != NUMA_NO_NODE) {
- input->proximity_domain_info.domain_id = pxm;
- input->proximity_domain_info.flags.reserved = 0;
- input->proximity_domain_info.flags.proximity_info_valid = 1;
- input->proximity_domain_info.flags.proximity_preferred = 1;
- } else {
- input->proximity_domain_info.as_uint64 = 0;
- }
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
local_irq_restore(irq_flags);
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index fcd20c6dc7f9..67b68d0d17d1 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
-extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
+extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void
@@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
return dest;
}
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
- void *func)
+ void *func, void *ip)
{
return 0;
}
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 94ce0f7c9d3a..e6ab0cf15ed5 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -13,6 +13,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
+#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -98,7 +99,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
- return *((volatile u32 *)(APIC_BASE + reg));
+ return readl((void __iomem *)(APIC_BASE + reg));
}
static inline void native_apic_mem_eoi(void)
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 076bf8dee702..25466c4d2134 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -14,6 +14,7 @@
#include <asm/asm.h>
#include <asm/fred.h>
#include <asm/gsseg.h>
+#include <asm/nospec-branch.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index fe1e7e3cc844..63bdc6b85219 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0)
+/* Writing to CR3 provides a full memory barrier in switch_mm(). */
+#define smp_mb__after_switch_mm() do { } while (0)
+
#include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index fb7388bbc212..aa6c8f8ca958 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -22,8 +22,10 @@ static inline void cc_set_mask(u64 mask)
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
+void cc_random_init(void);
#else
#define cc_vendor (CC_VENDOR_NONE)
+static const u64 cc_mask = 0;
static inline u64 cc_mkenc(u64 val)
{
@@ -34,6 +36,7 @@ static inline u64 cc_mkdec(u64 val)
{
return val;
}
+static inline void cc_random_init(void) { }
#endif
#endif /* _ASM_X86_COCO_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index a1273698fc43..686e92d2663e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -33,6 +33,8 @@ enum cpuid_leafs
CPUID_7_EDX,
CPUID_8000_001F_EAX,
CPUID_8000_0021_EAX,
+ CPUID_LNX_5,
+ NR_CPUID_WORDS,
};
#define X86_CAP_FMT_NUM "%d:%d"
@@ -91,8 +93,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -116,8 +119,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f0337f7bcf16..3c7434329661 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 21 /* N 32-bit words worth of info */
+#define NCAPINTS 22 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
@@ -460,6 +460,18 @@
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
/*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+ * CPUID levels like 0x80000022, etc and Linux defined features.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
+#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
+#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
+#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
+#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
+
+/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
@@ -507,4 +519,5 @@
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
+#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crash_reserve.h b/arch/x86/include/asm/crash_reserve.h
index 152239f95541..7835b2cdff04 100644
--- a/arch/x86/include/asm/crash_reserve.h
+++ b/arch/x86/include/asm/crash_reserve.h
@@ -39,4 +39,6 @@ static inline unsigned long crash_low_size_default(void)
#endif
}
+#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+
#endif /* _X86_CRASH_RESERVE_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index da4054fbf533..c492bdc97b05 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -155,6 +155,7 @@
#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 (DISABLE_SEV_SNP)
#define DISABLED_MASK20 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define DISABLED_MASK21 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 16e07a2eee19..6efd1497b026 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -855,6 +855,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
+ bool is_amd_compatible;
/*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 05956bd8bacf..e72c2b872957 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -61,10 +61,13 @@
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */
+#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
- | SPEC_CTRL_RRSBA_DIS_S)
+ | SPEC_CTRL_RRSBA_DIS_S \
+ | SPEC_CTRL_BHI_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -163,6 +166,10 @@
* are restricted to targets in
* kernel.
*/
+#define ARCH_CAP_BHI_NO BIT(20) /*
+ * CPU is not affected by Branch
+ * History Injection.
+ */
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index fc3a8a3c7ffe..ff5f1ecc7d1e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -262,11 +262,20 @@
.Lskip_rsb_\@:
.endm
+/*
+ * The CALL to srso_alias_untrain_ret() must be patched in directly at
+ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
+ * must be the target of a CALL instruction instead of indirectly
+ * jumping to a wrapper which then calls it. Therefore, this macro is
+ * called outside of __UNTRAIN_RET below, for the time being, before the
+ * kernel can support nested alternatives with arbitrary nesting.
+ */
+.macro CALL_UNTRAIN_RET
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
-#define CALL_UNTRAIN_RET "call entry_untrain_ret"
-#else
-#define CALL_UNTRAIN_RET ""
+ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
+ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
+.endm
/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
@@ -282,8 +291,8 @@
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
#if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
VALIDATE_UNRET_END
- ALTERNATIVE_3 "", \
- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET
+ ALTERNATIVE_2 "", \
"call entry_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
@@ -317,6 +326,19 @@
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
.endm
+#ifdef CONFIG_X86_64
+.macro CLEAR_BRANCH_HISTORY
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
+.endm
+
+.macro CLEAR_BRANCH_HISTORY_VMEXIT
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
+.endm
+#else
+#define CLEAR_BRANCH_HISTORY
+#define CLEAR_BRANCH_HISTORY_VMEXIT
+#endif
+
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@@ -342,6 +364,8 @@ extern void retbleed_return_thunk(void);
static inline void retbleed_return_thunk(void) {}
#endif
+extern void srso_alias_untrain_ret(void);
+
#ifdef CONFIG_MITIGATION_SRSO
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
@@ -357,6 +381,10 @@ extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
+#ifdef CONFIG_X86_64
+extern void clear_bhb_loop(void);
+#endif
+
extern void (*x86_return_thunk)(void);
extern void __warn_thunk(void);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 3736b8a46c04..7f1e17250546 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -555,6 +555,7 @@ struct x86_pmu_lbr {
unsigned int from;
unsigned int to;
unsigned int info;
+ bool has_callstack;
};
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0b748ee16b3d..9abb8cc4cd47 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -148,7 +148,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
- _PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
+ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
@@ -173,6 +173,7 @@ enum page_cache_mode {
};
#endif
+#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 7ba1726b71c7..e9187ddd3d1f 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -99,6 +99,7 @@
#define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define REQUIRED_MASK21 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 9477b4053bce..7f57382afee4 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -218,17 +218,16 @@ void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __noreturn snp_abort(void);
+void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
-void kdump_sev_callback(void);
void sev_show_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
@@ -244,12 +243,12 @@ static inline void __init
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
static inline void __init
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
-static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
+static inline void snp_dmi_setup(void) { }
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{
return -ENOTTY;
@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
-static inline void kdump_sev_callback(void) { }
static inline void sev_show_status(void) { }
#endif
@@ -270,6 +268,7 @@ int psmash(u64 pfn);
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
+void kdump_sev_callback(void);
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
@@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
}
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
+static inline void kdump_sev_callback(void) { }
#endif
#endif
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index f44e2f9ab65d..2fc7bc3863ff 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,19 +16,17 @@
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
+/* This is used purely for kernel/trace/trace_syscalls.c */
typedef long (*sys_call_ptr_t)(const struct pt_regs *);
extern const sys_call_ptr_t sys_call_table[];
-#if defined(CONFIG_X86_32)
-#define ia32_sys_call_table sys_call_table
-#else
/*
* These may not exist, but still put the prototypes in so we
* can use IS_ENABLED().
*/
-extern const sys_call_ptr_t ia32_sys_call_table[];
-extern const sys_call_ptr_t x32_sys_call_table[];
-#endif
+extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
+extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
+extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
@@ -127,6 +125,7 @@ static inline int syscall_get_arch(struct task_struct *task)
}
bool do_syscall_64(struct pt_regs *regs, int nr);
+void do_int80_emulation(struct pt_regs *regs);
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index b89b40f250e6..6149eabe200f 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -30,12 +30,13 @@ struct x86_init_mpparse {
* @reserve_resources: reserve the standard resources for the
* platform
* @memory_setup: platform specific memory setup
- *
+ * @dmi_setup: platform specific DMI setup
*/
struct x86_init_resources {
void (*probe_roms)(void);
void (*reserve_resources)(void);
char *(*memory_setup)(void);
+ void (*dmi_setup)(void);
};
/**
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index ad29984d5e39..ef11aa4cab42 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -694,6 +694,7 @@ enum sev_cmd_id {
struct kvm_sev_cmd {
__u32 id;
+ __u32 pad0;
__u64 data;
__u32 error;
__u32 sev_fd;
@@ -704,28 +705,35 @@ struct kvm_sev_launch_start {
__u32 policy;
__u64 dh_uaddr;
__u32 dh_len;
+ __u32 pad0;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad1;
};
struct kvm_sev_launch_update_data {
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_launch_secret {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
struct kvm_sev_launch_measure {
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_guest_status {
@@ -738,33 +746,43 @@ struct kvm_sev_dbg {
__u64 src_uaddr;
__u64 dst_uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_attestation_report {
__u8 mnonce[16];
__u64 uaddr;
__u32 len;
+ __u32 pad0;
};
struct kvm_sev_send_start {
__u32 policy;
+ __u32 pad0;
__u64 pdh_cert_uaddr;
__u32 pdh_cert_len;
+ __u32 pad1;
__u64 plat_certs_uaddr;
__u32 plat_certs_len;
+ __u32 pad2;
__u64 amd_certs_uaddr;
__u32 amd_certs_len;
+ __u32 pad3;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad4;
};
struct kvm_sev_send_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
struct kvm_sev_receive_start {
@@ -772,17 +790,22 @@ struct kvm_sev_receive_start {
__u32 policy;
__u64 pdh_uaddr;
__u32 pdh_len;
+ __u32 pad0;
__u64 session_uaddr;
__u32 session_len;
+ __u32 pad1;
};
struct kvm_sev_receive_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
+ __u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
+ __u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
+ __u32 pad2;
};
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 6bc3456a8ebf..a1efa7907a0b 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -142,7 +142,6 @@ struct kvm_vcpu_pv_apf_data {
__u32 token;
__u8 pad[56];
- __u32 enabled;
};
#define KVM_PV_EOI_BIT 0
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a42d8a6f7149..c342c4aa9c68 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1687,11 +1687,11 @@ static int x2apic_state;
static bool x2apic_hw_locked(void)
{
- u64 ia32_cap;
+ u64 x86_arch_cap_msr;
u64 msr;
- ia32_cap = x86_read_arch_cap_msr();
- if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+ if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED);
}
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 30335182b6b0..e92ff0c11db8 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
return !bcmp(pad, insn_buff, tmpl_size);
}
-int x86_call_depth_emit_accounting(u8 **pprog, void *func)
+int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;
u8 insn_buff[MAX_PATCH_LEN];
@@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, tmpl_size, *pprog,
+ apply_relocation(insn_buff, tmpl_size, ip,
skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6d8677e80ddb..307302af0aee 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
+static void bsp_determine_snp(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+ cc_vendor = CC_VENDOR_AMD;
+
+ if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
+ /*
+ * RMP table entry format is not architectural and is defined by the
+ * per-processor PPR. Restrict SNP support on the known CPU models
+ * for which the RMP table entry format is currently defined for.
+ */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+ c->x86 >= 0x19 && snp_probe_rmptable_info()) {
+ cc_platform_set(CC_ATTR_HOST_SEV_SNP);
+ } else {
+ setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+ }
+ }
+#endif
+}
+
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -437,8 +459,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x1a:
switch (c->x86_model) {
- case 0x00 ... 0x0f:
- case 0x20 ... 0x2f:
+ case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
case 0x70 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
@@ -452,21 +473,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break;
}
- if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
- /*
- * RMP table entry format is not architectural and it can vary by processor
- * and is defined by the per-processor PPR. Restrict SNP support on the
- * known CPU model and family for which the RMP table entry format is
- * currently defined for.
- */
- if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
- !boot_cpu_has(X86_FEATURE_ZEN4) &&
- !boot_cpu_has(X86_FEATURE_ZEN5))
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
- else if (!snp_probe_rmptable_info())
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
- }
-
+ bsp_determine_snp(c);
return;
warn:
@@ -527,7 +534,6 @@ clear_sev:
static void early_init_amd(struct cpuinfo_x86 *c)
{
- u64 value;
u32 dummy;
if (c->x86 >= 0xf)
@@ -595,20 +601,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
early_detect_mem_encrypt(c);
- /* Re-enable TopologyExtensions if switched off by BIOS */
- if (c->x86 == 0x15 &&
- (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
- if (msr_set_bit(0xc0011005, 54) > 0) {
- rdmsrl(0xc0011005, value);
- if (value & BIT_64(54)) {
- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
- }
- }
- }
-
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index e7ba936d798b..ab18185894df 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd);
+static u64 __ro_after_init x86_arch_cap_msr;
+
static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
static void __init taa_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_OFF;
return;
@@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/*
@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) {
@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
- ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
* be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state.
*/
- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
/*
@@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems.
*/
- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)))
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -659,8 +656,6 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
return;
@@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */
/* No microcode */
- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) {
/*
* This only needs to be done on the boot CPU so do it
@@ -1544,20 +1538,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
+static bool __ro_after_init rrsba_disabled;
+
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
- u64 ia32_cap;
+ if (rrsba_disabled)
+ return;
- if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
+ rrsba_disabled = true;
return;
+ }
- ia32_cap = x86_read_arch_cap_msr();
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ return;
- if (ia32_cap & ARCH_CAP_RRSBA) {
- x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
- update_spec_ctrl(x86_spec_ctrl_base);
- }
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
@@ -1607,6 +1606,74 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
dump_stack();
}
+/*
+ * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
+ * branch history in userspace. Not needed if BHI_NO is set.
+ */
+static bool __init spec_ctrl_bhi_dis(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
+ return false;
+
+ x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
+
+ return true;
+}
+
+enum bhi_mitigations {
+ BHI_MITIGATION_OFF,
+ BHI_MITIGATION_ON,
+};
+
+static enum bhi_mitigations bhi_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+
+static int __init spectre_bhi_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ bhi_mitigation = BHI_MITIGATION_OFF;
+ else if (!strcmp(str, "on"))
+ bhi_mitigation = BHI_MITIGATION_ON;
+ else
+ pr_err("Ignoring unknown spectre_bhi option (%s)", str);
+
+ return 0;
+}
+early_param("spectre_bhi", spectre_bhi_parse_cmdline);
+
+static void __init bhi_select_mitigation(void)
+{
+ if (bhi_mitigation == BHI_MITIGATION_OFF)
+ return;
+
+ /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
+ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+ spec_ctrl_disable_kernel_rrsba();
+ if (rrsba_disabled)
+ return;
+ }
+
+ if (spec_ctrl_bhi_dis())
+ return;
+
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ /* Mitigate KVM by default */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
+
+ /* Mitigate syscalls when the mitigation is forced =on */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
+ pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1718,6 +1785,9 @@ static void __init spectre_v2_select_mitigation(void)
mode == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
+ if (boot_cpu_has(X86_BUG_BHI))
+ bhi_select_mitigation();
+
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
@@ -1832,8 +1902,6 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant.
@@ -1848,7 +1916,7 @@ static void update_mds_branch_idle(void)
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
}
}
@@ -2695,15 +2763,15 @@ static char *stibp_state(void)
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
- return ", STIBP: disabled";
+ return "; STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
- return ", STIBP: forced";
+ return "; STIBP: forced";
case SPECTRE_V2_USER_STRICT_PREFERRED:
- return ", STIBP: always-on";
+ return "; STIBP: always-on";
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp))
- return ", STIBP: conditional";
+ return "; STIBP: conditional";
}
return "";
}
@@ -2712,10 +2780,10 @@ static char *ibpb_state(void)
{
if (boot_cpu_has(X86_FEATURE_IBPB)) {
if (static_key_enabled(&switch_mm_always_ibpb))
- return ", IBPB: always-on";
+ return "; IBPB: always-on";
if (static_key_enabled(&switch_mm_cond_ibpb))
- return ", IBPB: conditional";
- return ", IBPB: disabled";
+ return "; IBPB: conditional";
+ return "; IBPB: disabled";
}
return "";
}
@@ -2725,14 +2793,32 @@ static char *pbrsb_eibrs_state(void)
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
- return ", PBRSB-eIBRS: SW sequence";
+ return "; PBRSB-eIBRS: SW sequence";
else
- return ", PBRSB-eIBRS: Vulnerable";
+ return "; PBRSB-eIBRS: Vulnerable";
} else {
- return ", PBRSB-eIBRS: Not affected";
+ return "; PBRSB-eIBRS: Not affected";
}
}
+static const char *spectre_bhi_state(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_BHI))
+ return "; BHI: Not affected";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
+ return "; BHI: BHI_DIS_S";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ return "; BHI: SW loop, KVM: SW loop";
+ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
+ rrsba_disabled)
+ return "; BHI: Retpoline";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ return "; BHI: Vulnerable, KVM: SW loop";
+
+ return "; BHI: Vulnerable";
+}
+
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -2745,13 +2831,15 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
- return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
+ return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
stibp_state(),
- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
pbrsb_eibrs_state(),
+ spectre_bhi_state(),
+ /* this should always be at the end */
spectre_v2_module_string());
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5c1e6d6be267..605c26c009c8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1120,6 +1120,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SPECTRE_V2 BIT(8)
#define NO_MMIO BIT(9)
#define NO_EIBRS_PBRSB BIT(10)
+#define NO_BHI BIT(11)
#define VULNWL(vendor, family, model, whitelist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
@@ -1182,18 +1183,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
/* Zhaoxin Family 7 */
- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
+ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
{}
};
@@ -1283,25 +1284,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
u64 x86_read_arch_cap_msr(void)
{
- u64 ia32_cap = 0;
+ u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
- return ia32_cap;
+ return x86_arch_cap_msr;
}
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{
- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
- ia32_cap & ARCH_CAP_PSDP_NO &&
- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
}
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
- if (ia32_cap & ARCH_CAP_RFDS_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
return false;
/*
@@ -1309,7 +1310,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
* indicate that mitigation is needed because guest is running on a
* vulnerable hardware or may migrate to such hardware:
*/
- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
return true;
/* Only consult the blacklist when there is no enumeration: */
@@ -1318,11 +1319,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1334,7 +1335,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1345,17 +1346,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Don't use AutoIBRS when SNP is enabled because it degrades host
* userspace indirect branch performance.
*/
- if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
(cpu_has(c, X86_FEATURE_AUTOIBRS) &&
!cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1374,9 +1375,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* TSX_CTRL check alone is not sufficient for cases when the microcode
* update is not present or running as guest that don't get TSX_CTRL.
*/
- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
(cpu_has(c, X86_FEATURE_RTM) ||
- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
/*
@@ -1402,7 +1403,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
- if (!arch_cap_mmio_immune(ia32_cap)) {
+ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@@ -1410,7 +1411,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
@@ -1428,18 +1429,25 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
* which means that AVX will be disabled.
*/
- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS);
- if (vulnerable_to_rfds(ia32_cap))
+ if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
+ /* When virtualized, eIBRS could be hidden, assume vulnerable */
+ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+ setup_force_cpu_bug(X86_BUG_BHI);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
- if (ia32_cap & ARCH_CAP_RDCL_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index b7174209d855..946813d816bf 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
+ { X86_FEATURE_VAES, X86_FEATURE_AVX },
+ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
- { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b5cc557cfc37..84d41be6d06b 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -2500,12 +2500,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank];
-
if (!b->init)
return -ENODEV;
b->ctl = new;
+
+ mutex_lock(&mce_sysfs_mutex);
mce_restart();
+ mutex_unlock(&mce_sysfs_mutex);
return size;
}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 422a4ddc2ab7..7b29ebda024f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
- if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
rdmsr(MSR_AMD64_SYSCFG, lo, hi);
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c99f26ebe7a6..1a8687f8073a 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -78,7 +78,8 @@ cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
else
cpu = cpumask_any_but(mask, exclude_cpu);
- if (!IS_ENABLED(CONFIG_NO_HZ_FULL))
+ /* Only continue if tick_nohz_full_mask has been initialized. */
+ if (!tick_nohz_full_enabled())
return cpu;
/* If the CPU picked isn't marked nohz_full nothing more needs doing. */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 0dad49a09b7a..af5aa2c754c2 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
+ { X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
@@ -49,6 +50,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
+ { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index aaca8d235dc2..d17c9b71eb4a 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -123,7 +123,6 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
#endif
- set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
}
@@ -210,7 +209,11 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
topo_info.nr_disabled_cpus++;
}
- /* Register present and possible CPUs in the domain maps */
+ /*
+ * Register present and possible CPUs in the domain
+ * maps. cpu_possible_map will be updated in
+ * topology_init_possible_cpus() after enumeration is done.
+ */
for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++)
set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
}
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index 1a8b3ad493af..a7aa6eff4ae5 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -29,11 +29,21 @@ static bool parse_8000_0008(struct topo_scan *tscan)
if (!sft)
sft = get_count_order(ecx.cpu_nthreads + 1);
- topology_set_dom(tscan, TOPO_SMT_DOMAIN, sft, ecx.cpu_nthreads + 1);
+ /*
+ * cpu_nthreads describes the number of threads in the package
+ * sft is the number of APIC ID bits per package
+ *
+ * As the number of actual threads per core is not described in
+ * this leaf, just set the CORE domain shift and let the later
+ * parsers set SMT shift. Assume one thread per core by default
+ * which is correct if there are no other CPUID leafs to parse.
+ */
+ topology_update_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
+ topology_set_dom(tscan, TOPO_CORE_DOMAIN, sft, ecx.cpu_nthreads + 1);
return true;
}
-static void store_node(struct topo_scan *tscan, unsigned int nr_nodes, u16 node_id)
+static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id)
{
/*
* Starting with Fam 17h the DIE domain could probably be used to
@@ -73,12 +83,14 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
/*
- * If leaf 0xb is available, then SMT shift is set already. If not
- * take it from ecx.threads_per_core and use topo_update_dom() -
- * topology_set_dom() would propagate and overwrite the already
- * propagated CORE level.
+ * If leaf 0xb is available, then the domain shifts are set
+ * already and nothing to do here.
*/
if (!has_0xb) {
+ /*
+ * Leaf 0x80000008 set the CORE domain shift already.
+ * Update the SMT domain, but do not propagate it.
+ */
unsigned int nthreads = leaf.core_nthreads + 1;
topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
@@ -109,13 +121,13 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
static bool parse_fam10h_node_id(struct topo_scan *tscan)
{
- struct {
- union {
+ union {
+ struct {
u64 node_id : 3,
nodes_per_pkg : 3,
unused : 58;
- u64 msr;
};
+ u64 msr;
} nid;
if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
@@ -135,6 +147,26 @@ static void legacy_set_llc(struct topo_scan *tscan)
tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
}
+static void topoext_fixup(struct topo_scan *tscan)
+{
+ struct cpuinfo_x86 *c = tscan->c;
+ u64 msrval;
+
+ /* Try to re-enable TopologyExtensions if switched off by BIOS */
+ if (cpu_has(c, X86_FEATURE_TOPOEXT) || c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 0x15 || c->x86_model < 0x10 || c->x86_model > 0x6f)
+ return;
+
+ if (msr_set_bit(0xc0011005, 54) <= 0)
+ return;
+
+ rdmsrl(0xc0011005, msrval);
+ if (msrval & BIT_64(54)) {
+ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ }
+}
+
static void parse_topology_amd(struct topo_scan *tscan)
{
bool has_0xb = false;
@@ -164,6 +196,7 @@ static void parse_topology_amd(struct topo_scan *tscan)
void cpu_parse_topology_amd(struct topo_scan *tscan)
{
tscan->amd_nodes_per_pkg = 1;
+ topoext_fixup(tscan);
parse_topology_amd(tscan);
if (tscan->amd_nodes_per_pkg > 1)
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index e963344b0449..53935b4d62e3 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -2,6 +2,7 @@
/*
* EISA specific code
*/
+#include <linux/cc_platform.h>
#include <linux/ioport.h>
#include <linux/eisa.h>
#include <linux/io.h>
@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
{
void __iomem *p;
- if (xen_pv_domain() && !xen_initial_domain())
+ if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return 0;
p = ioremap(0x0FFFD9, 4);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4cadfd606e8e..7f0732bc0ccd 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -65,6 +65,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
+static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
@@ -244,7 +245,7 @@ noinstr u32 kvm_read_and_reset_apf_flags(void)
{
u32 flags = 0;
- if (__this_cpu_read(apf_reason.enabled)) {
+ if (__this_cpu_read(async_pf_enabled)) {
flags = __this_cpu_read(apf_reason.flags);
__this_cpu_write(apf_reason.flags, 0);
}
@@ -295,7 +296,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
inc_irq_stat(irq_hv_callback_count);
- if (__this_cpu_read(apf_reason.enabled)) {
+ if (__this_cpu_read(async_pf_enabled)) {
token = __this_cpu_read(apf_reason.token);
kvm_async_pf_task_wake(token);
__this_cpu_write(apf_reason.token, 0);
@@ -362,7 +363,7 @@ static void kvm_guest_cpu_init(void)
wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
- __this_cpu_write(apf_reason.enabled, 1);
+ __this_cpu_write(async_pf_enabled, true);
pr_debug("setup async PF for cpu %d\n", smp_processor_id());
}
@@ -383,11 +384,11 @@ static void kvm_guest_cpu_init(void)
static void kvm_pv_disable_apf(void)
{
- if (!__this_cpu_read(apf_reason.enabled))
+ if (!__this_cpu_read(async_pf_enabled))
return;
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
- __this_cpu_write(apf_reason.enabled, 0);
+ __this_cpu_write(async_pf_enabled, false);
pr_debug("disable async PF for cpu %d\n", smp_processor_id());
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 9a5b372c706f..ed163c8c8604 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -580,7 +580,7 @@ EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
static char *nmi_check_stall_msg[] = {
/* */
-/* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */
+/* +--------- nmi_seq & 0x1: CPU is currently in NMI handler. */
/* | +------ cpu_is_offline(cpu) */
/* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls): */
/* | | | NMI handler has been invoked. */
@@ -628,22 +628,26 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
nmi_seq = READ_ONCE(nsp->idt_nmi_seq);
if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) {
msgp = "CPU entered NMI handler function, but has not exited";
- } else if ((nsp->idt_nmi_seq_snap & 0x1) != (nmi_seq & 0x1)) {
- msgp = "CPU is handling NMIs";
- } else {
- idx = ((nsp->idt_seq_snap & 0x1) << 2) |
+ } else if (nsp->idt_nmi_seq_snap == nmi_seq ||
+ nsp->idt_nmi_seq_snap + 1 == nmi_seq) {
+ idx = ((nmi_seq & 0x1) << 2) |
(cpu_is_offline(cpu) << 1) |
(nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls));
msgp = nmi_check_stall_msg[idx];
if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
modp = ", but OK because ignore_nmis was set";
- if (nmi_seq & 0x1)
- msghp = " (CPU currently in NMI handler function)";
- else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
+ if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
msghp = " (CPU exited one NMI handler function)";
+ else if (nmi_seq & 0x1)
+ msghp = " (CPU currently in NMI handler function)";
+ else
+ msghp = " (CPU was never in an NMI handler function)";
+ } else {
+ msgp = "CPU is handling NMIs";
}
- pr_alert("%s: CPU %d: %s%s%s, last activity: %lu jiffies ago.\n",
- __func__, cpu, msgp, modp, msghp, j - READ_ONCE(nsp->recv_jiffies));
+ pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);
+ pr_alert("%s: last activity: %lu jiffies ago.\n",
+ __func__, j - READ_ONCE(nsp->recv_jiffies));
}
}
diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
index 319fef37d9dc..cc2c34ba7228 100644
--- a/arch/x86/kernel/probe_roms.c
+++ b/arch/x86/kernel/probe_roms.c
@@ -203,16 +203,6 @@ void __init probe_roms(void)
unsigned char c;
int i;
- /*
- * The ROM memory range is not part of the e820 table and is therefore not
- * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
- * memory, and SNP requires encrypted memory to be validated before access.
- * Do that here.
- */
- snp_prep_memory(video_rom_resource.start,
- ((system_rom_resource.end + 1) - video_rom_resource.start),
- SNP_PAGE_STATE_PRIVATE);
-
/* video rom */
upper = adapter_rom_resources[0].start;
for (start = video_rom_resource.start; start < upper; start += 2048) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7062b84dd467..6d3d20e3e43a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -139,7 +139,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (cpu_feature_enabled(X86_FEATURE_OSPKE))
+ if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ef206500ed6f..e125e059e2c4 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -9,7 +9,6 @@
#include <linux/console.h>
#include <linux/crash_dump.h>
#include <linux/dma-map-ops.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/ima.h>
#include <linux/init_ohci1394_dma.h>
@@ -36,6 +35,7 @@
#include <asm/bios_ebda.h>
#include <asm/bugs.h>
#include <asm/cacheinfo.h>
+#include <asm/coco.h>
#include <asm/cpu.h>
#include <asm/efi.h>
#include <asm/gart.h>
@@ -902,7 +902,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
reserve_ibft_region();
- dmi_setup();
+ x86_init.resources.dmi_setup();
/*
* VMware detection requires dmi to be available, so this
@@ -992,6 +992,7 @@ void __init setup_arch(char **cmdline_p)
* memory size.
*/
mem_encrypt_setup_arch();
+ cc_random_init();
efi_fake_memmap();
efi_find_mirror();
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 8b04958da5e7..b4f8fa0f722c 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1203,12 +1203,14 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
break;
case SVM_EXIT_MONITOR:
- if (opcode == 0x010f && modrm == 0xc8)
+ /* MONITOR and MONITORX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
return ES_OK;
break;
case SVM_EXIT_MWAIT:
- if (opcode == 0x010f && modrm == 0xc9)
+ /* MWAIT and MWAITX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
return ES_OK;
break;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index b59b09c2f284..38ad066179d8 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/psp-sev.h>
+#include <linux/dmi.h>
#include <uapi/linux/sev-guest.h>
#include <asm/init.h>
@@ -795,21 +796,6 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
}
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
-{
- unsigned long vaddr, npages;
-
- vaddr = (unsigned long)__va(paddr);
- npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
-
- if (op == SNP_PAGE_STATE_PRIVATE)
- early_snp_set_memory_private(vaddr, paddr, npages);
- else if (op == SNP_PAGE_STATE_SHARED)
- early_snp_set_memory_shared(vaddr, paddr, npages);
- else
- WARN(1, "invalid memory op %d\n", op);
-}
-
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op)
{
@@ -2136,6 +2122,17 @@ void __head __noreturn snp_abort(void)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
+/*
+ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
+ * enabled, as the alternative (fallback) logic for DMI probing in the legacy
+ * ROM region can cause a crash since this region is not pre-validated.
+ */
+void __init snp_dmi_setup(void)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ dmi_setup();
+}
+
static void dump_cpuid_table(void)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -2287,16 +2284,6 @@ static int __init snp_init_platform_device(void)
}
device_initcall(snp_init_platform_device);
-void kdump_sev_callback(void)
-{
- /*
- * Do wbinvd() on remote CPUs when SNP is enabled in order to
- * safely do SNP_SHUTDOWN on the local CPU.
- */
- if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
- wbinvd();
-}
-
void sev_show_status(void)
{
int i;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a42830dc151b..d5dc5a92635a 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -3,6 +3,7 @@
*
* For licencing details see kernel-base/COPYING
*/
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/export.h>
@@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
.probe_roms = probe_roms,
.reserve_resources = reserve_standard_io_resources,
.memory_setup = e820__memory_setup_default,
+ .dmi_setup = dmi_setup,
},
.mpparse = {
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3aaf7e86a859..0ebdd088f28b 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -122,6 +122,7 @@ config KVM_AMD_SEV
default y
depends on KVM_AMD && X86_64
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
+ select ARCH_HAS_CC_PLATFORM
help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a88bb14266b6..addc44fc7187 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -3,11 +3,6 @@
ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror
-ifeq ($(CONFIG_FRAME_POINTER),y)
-OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
-OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
-endif
-
include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index adba49afb5fe..77352a4abd87 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -189,15 +189,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
return 0;
}
-static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
- const char *sig)
+static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
+ int nent, const char *sig)
{
struct kvm_hypervisor_cpuid cpuid = {};
struct kvm_cpuid_entry2 *entry;
u32 base;
for_each_possible_hypervisor_cpuid_base(base) {
- entry = kvm_find_cpuid_entry(vcpu, base);
+ entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
if (entry) {
u32 signature[3];
@@ -217,22 +217,29 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
return cpuid;
}
-static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
- struct kvm_cpuid_entry2 *entries, int nent)
+static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
+ const char *sig)
{
- u32 base = vcpu->arch.kvm_cpuid.base;
-
- if (!base)
- return NULL;
+ return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent, sig);
+}
- return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
+static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
+ int nent, u32 kvm_cpuid_base)
+{
+ return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
}
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
{
- return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent);
+ u32 base = vcpu->arch.kvm_cpuid.base;
+
+ if (!base)
+ return NULL;
+
+ return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent, base);
}
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
@@ -266,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
int nent)
{
struct kvm_cpuid_entry2 *best;
+ struct kvm_hypervisor_cpuid kvm_cpuid;
best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
if (best) {
@@ -292,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
- if (kvm_hlt_in_guest(vcpu->kvm) && best &&
- (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
- best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+ kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
+ if (kvm_cpuid.base) {
+ best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
+ if (kvm_hlt_in_guest(vcpu->kvm) && best)
+ best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+ }
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
@@ -366,6 +376,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu);
+ vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 856e3037e74f..23dbb9eb277c 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}
+static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.is_amd_compatible;
+}
+
+static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
+{
+ return !guest_cpuid_is_amd_compatible(vcpu);
+}
+
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cf37586f0466..ebf41023be38 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2776,7 +2776,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
- if (r && lvt_type == APIC_LVTPC)
+ if (r && lvt_type == APIC_LVTPC &&
+ guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 992e651540e8..db007a4dffa2 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4935,7 +4935,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
- guest_cpuid_is_amd_or_hygon(vcpu));
+ guest_cpuid_is_amd_compatible(vcpu));
}
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
@@ -5576,9 +5576,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
* that problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
- vcpu->arch.root_mmu.root_role.word = 0;
- vcpu->arch.guest_mmu.root_role.word = 0;
- vcpu->arch.nested_mmu.root_role.word = 0;
+ vcpu->arch.root_mmu.root_role.invalid = 1;
+ vcpu->arch.guest_mmu.root_role.invalid = 1;
+ vcpu->arch.nested_mmu.root_role.invalid = 1;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
@@ -7399,7 +7399,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes.
*/
- if (gfn >= slot->base_gfn) {
+ if (gfn >= slot->base_gfn &&
+ gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d078157e62aa..04c1f0957fea 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1548,17 +1548,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}
-/*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
- */
+static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
+{
+ /*
+ * All TDP MMU shadow pages share the same role as their root, aside
+ * from level, so it is valid to key off any shadow page to determine if
+ * write protection is needed for an entire tree.
+ */
+ return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
+}
+
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
- u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
+ const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
+ shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;
@@ -1573,7 +1577,7 @@ retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (!(iter.old_spte & dbit))
@@ -1590,11 +1594,9 @@ retry:
}
/*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
+ * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
+ * memslot. Returns true if an SPTE has been changed and the TLBs need to be
+ * flushed.
*/
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot)
@@ -1610,18 +1612,11 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
return spte_set;
}
-/*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
- */
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
- u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
- shadow_dirty_mask;
+ const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
+ shadow_dirty_mask;
struct tdp_iter iter;
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -1633,7 +1628,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;
- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (iter.level > PG_LEVEL_4K ||
@@ -1659,11 +1654,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
}
/*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
+ * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
+ * which a bit is set in mask, starting at gfn. The given memslot is expected to
+ * contain all the GFNs represented by set bits in the mask.
*/
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c397b28e3d1b..a593b03c9aed 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -775,8 +775,20 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_mask = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
- if (vcpu->kvm->arch.enable_pmu)
- static_call(kvm_x86_pmu_refresh)(vcpu);
+ if (!vcpu->kvm->arch.enable_pmu)
+ return;
+
+ static_call(kvm_x86_pmu_refresh)(vcpu);
+
+ /*
+ * At RESET, both Intel and AMD CPUs set all enable bits for general
+ * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
+ * was written for v1 PMUs don't unknowingly leave GP counters disabled
+ * in the global controls). Emulate that behavior when refreshing the
+ * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
+ */
+ if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
+ pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index aadefcaa9561..2f4e155080ba 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -52,7 +52,7 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
-#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
/* CPUID level 0x80000007 (EDX). */
@@ -102,10 +102,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
*/
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
{
+ BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
}
@@ -126,6 +128,7 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+ KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
default:
return x86_feature;
}
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index ae0ac12382b9..759581bb2128 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -84,9 +84,10 @@ struct enc_region {
};
/* Called with the sev_bitmap_lock held, or on shutdown */
-static int sev_flush_asids(int min_asid, int max_asid)
+static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
{
- int ret, asid, error = 0;
+ int ret, error = 0;
+ unsigned int asid;
/* Check if there are any ASIDs to reclaim before performing a flush */
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
}
/* Must be called with the sev_bitmap_lock held */
-static bool __sev_recycle_asids(int min_asid, int max_asid)
+static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{
if (sev_flush_asids(min_asid, max_asid))
return false;
@@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
static int sev_asid_new(struct kvm_sev_info *sev)
{
- int asid, min_asid, max_asid, ret;
+ /*
+ * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+ * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+ * Note: min ASID can end up larger than the max if basic SEV support is
+ * effectively disabled by disallowing use of ASIDs for SEV guests.
+ */
+ unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
+ unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
+ unsigned int asid;
bool retry = true;
+ int ret;
+
+ if (min_asid > max_asid)
+ return -ENOTTY;
WARN_ON(sev->misc_cg);
sev->misc_cg = get_current_misc_cg();
@@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
mutex_lock(&sev_bitmap_lock);
- /*
- * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
- * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
- */
- min_asid = sev->es_active ? 1 : min_sev_asid;
- max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again:
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
if (asid > max_asid) {
@@ -179,7 +186,8 @@ again:
mutex_unlock(&sev_bitmap_lock);
- return asid;
+ sev->asid = asid;
+ return 0;
e_uncharge:
sev_misc_cg_uncharge(sev);
put_misc_cg(sev->misc_cg);
@@ -187,7 +195,7 @@ e_uncharge:
return ret;
}
-static int sev_get_asid(struct kvm *kvm)
+static unsigned int sev_get_asid(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -247,21 +255,19 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_platform_init_args init_args = {0};
- int asid, ret;
+ int ret;
if (kvm->created_vcpus)
return -EINVAL;
- ret = -EBUSY;
if (unlikely(sev->active))
- return ret;
+ return -EINVAL;
sev->active = true;
sev->es_active = argp->id == KVM_SEV_ES_INIT;
- asid = sev_asid_new(sev);
- if (asid < 0)
+ ret = sev_asid_new(sev);
+ if (ret)
goto e_no_asid;
- sev->asid = asid;
init_args.probe = false;
ret = sev_platform_init(&init_args);
@@ -287,8 +293,8 @@ e_no_asid:
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{
+ unsigned int asid = sev_get_asid(kvm);
struct sev_data_activate activate;
- int asid = sev_get_asid(kvm);
int ret;
/* activate ASID on the given handle */
@@ -428,7 +434,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
/* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *);
if (size > PAGE_SIZE)
- pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
else
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
@@ -2240,8 +2246,10 @@ void __init sev_hardware_setup(void)
goto out;
}
- sev_asid_count = max_sev_asid - min_sev_asid + 1;
- WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
+ if (min_sev_asid <= max_sev_asid) {
+ sev_asid_count = max_sev_asid - min_sev_asid + 1;
+ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
+ }
sev_supported = true;
/* SEV-ES support requested? */
@@ -2272,7 +2280,9 @@ void __init sev_hardware_setup(void)
out:
if (boot_cpu_has(X86_FEATURE_SEV))
pr_info("SEV %s (ASIDs %u - %u)\n",
- sev_supported ? "enabled" : "disabled",
+ sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
+ "unusable" :
+ "disabled",
min_sev_asid, max_sev_asid);
if (boot_cpu_has(X86_FEATURE_SEV_ES))
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
@@ -2320,7 +2330,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
*/
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{
- int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+ unsigned int asid = sev_get_asid(vcpu->kvm);
/*
* Note! The address must be a kernel address, as regular page walk
@@ -2638,7 +2648,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
- int asid = sev_get_asid(svm->vcpu.kvm);
+ unsigned int asid = sev_get_asid(svm->vcpu.kvm);
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -3174,7 +3184,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
unsigned long pfn;
struct page *p;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d1a9f9951635..9aaf83c8d57d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
+static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
+{
+ return page_address(sd->save_area) + 0x400;
+}
+
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* or subsequent vmload of host save area.
*/
vmsave(sd->save_area_pa);
- if (sev_es_guest(vcpu->kvm)) {
- struct sev_es_save_area *hostsa;
- hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
-
- sev_es_prepare_switch_to_guest(hostsa);
- }
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
@@ -4101,6 +4102,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{
+ struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
guest_state_enter_irqoff();
@@ -4108,7 +4110,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
amd_clear_divider();
if (sev_es_guest(vcpu->kvm))
- __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+ __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
+ sev_es_host_save_area(sd));
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7f1fbd874c45..33878efdebc8 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
/* vmenter.S */
-void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
+ struct sev_es_save_area *hostsa);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 187018c424bf..a0c8eb37d3e1 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -3,6 +3,7 @@
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/bitsperlong.h>
+#include <asm/frame.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include "kvm-asm-offsets.h"
@@ -67,7 +68,7 @@
"", X86_FEATURE_V_SPEC_CTRL
901:
.endm
-.macro RESTORE_HOST_SPEC_CTRL_BODY
+.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
900:
/* Same for after vmexit. */
mov $MSR_IA32_SPEC_CTRL, %ecx
@@ -76,7 +77,7 @@
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
* if it was not intercepted during guest execution.
*/
- cmpb $0, (%_ASM_SP)
+ cmpb $0, \spec_ctrl_intercepted
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
@@ -99,6 +100,7 @@
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
@@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
RET
RESTORE_GUEST_SPEC_CTRL_BODY
- RESTORE_HOST_SPEC_CTRL_BODY
+ RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
10: cmpb $0, _ASM_RIP(kvm_rebooting)
jne 2b
@@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
SYM_FUNC_END(__svm_vcpu_run)
+#ifdef CONFIG_KVM_AMD_SEV
+
+
+#ifdef CONFIG_X86_64
+#define SEV_ES_GPRS_BASE 0x300
+#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
+#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
+#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
+#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
+#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
+#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
+#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
+#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
+#endif
+
/**
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
* @svm: struct vcpu_svm *
* @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
- push %_ASM_BP
-#ifdef CONFIG_X86_64
- push %r15
- push %r14
- push %r13
- push %r12
-#else
- push %edi
- push %esi
-#endif
- push %_ASM_BX
+ FRAME_BEGIN
/*
- * Save variables needed after vmexit on the stack, in inverse
- * order compared to when they are needed.
+ * Save non-volatile (callee-saved) registers to the host save area.
+ * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
+ * saved on VMRUN.
*/
+ mov %rbp, SEV_ES_RBP (%rdx)
+ mov %r15, SEV_ES_R15 (%rdx)
+ mov %r14, SEV_ES_R14 (%rdx)
+ mov %r13, SEV_ES_R13 (%rdx)
+ mov %r12, SEV_ES_R12 (%rdx)
+ mov %rbx, SEV_ES_RBX (%rdx)
- /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
- push %_ASM_ARG2
-
- /* Save @svm. */
- push %_ASM_ARG1
-
-.ifnc _ASM_ARG1, _ASM_DI
/*
- * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
- * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
+ * Save volatile registers that hold arguments that are needed after
+ * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
*/
- mov %_ASM_ARG1, %_ASM_DI
-.endif
+ mov %rdi, SEV_ES_RDI (%rdx)
+ mov %rsi, SEV_ES_RSI (%rdx)
- /* Clobbers RAX, RCX, RDX. */
+ /* Clobbers RAX, RCX, RDX (@hostsa). */
RESTORE_GUEST_SPEC_CTRL
/* Get svm->current_vmcb->pa into RAX. */
- mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
- mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+ mov SVM_current_vmcb(%rdi), %rax
+ mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
sti
-1: vmrun %_ASM_AX
+1: vmrun %rax
2: cli
- /* Pop @svm to RDI, guest registers have been saved already. */
- pop %_ASM_DI
-
#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif
- /* Clobbers RAX, RCX, RDX. */
+ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL
/*
@@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
*/
UNTRAIN_RET_VM
- /* "Pop" @spec_ctrl_intercepted. */
- pop %_ASM_BX
-
- pop %_ASM_BX
-
-#ifdef CONFIG_X86_64
- pop %r12
- pop %r13
- pop %r14
- pop %r15
-#else
- pop %esi
- pop %edi
-#endif
- pop %_ASM_BP
+ FRAME_END
RET
RESTORE_GUEST_SPEC_CTRL_BODY
- RESTORE_HOST_SPEC_CTRL_BODY
+ RESTORE_HOST_SPEC_CTRL_BODY %sil
-3: cmpb $0, _ASM_RIP(kvm_rebooting)
+3: cmpb $0, kvm_rebooting(%rip)
jne 2b
ud2
_ASM_EXTABLE(1b, 3b)
SYM_FUNC_END(__svm_sev_es_vcpu_run)
+#endif /* CONFIG_KVM_AMD_SEV */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 88659de4d2a7..c6b4b1728006 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
* Tracepoint for nested #vmexit because of interrupt pending
*/
TRACE_EVENT(kvm_invlpga,
- TP_PROTO(__u64 rip, int asid, u64 address),
+ TP_PROTO(__u64 rip, unsigned int asid, u64 address),
TP_ARGS(rip, asid, address),
TP_STRUCT__entry(
- __field( __u64, rip )
- __field( int, asid )
- __field( __u64, address )
+ __field( __u64, rip )
+ __field( unsigned int, asid )
+ __field( __u64, address )
),
TP_fast_assign(
@@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address;
),
- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
+ TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address)
);
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 12ade343a17e..be40474de6e4 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -535,7 +535,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT))
- x86_perf_get_lbr(&lbr_desc->records);
+ memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else
lbr_desc->records.nr = 0;
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 2bfbf758d061..f6986dee6f8c 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -275,6 +275,8 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
call vmx_spec_ctrl_restore_host
+ CLEAR_BRANCH_HISTORY_VMEXIT
+
/* Put return value in AX */
mov %_ASM_BX, %_ASM_AX
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c37a89eda90f..22411f4aff53 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -218,6 +218,8 @@ module_param(ple_window_max, uint, 0444);
int __read_mostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO);
+struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
+
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
@@ -7862,10 +7864,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu);
}
-static u64 vmx_get_perf_capabilities(void)
+static __init u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
- struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0;
if (!enable_pmu)
@@ -7875,15 +7876,43 @@ static u64 vmx_get_perf_capabilities(void)
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
- x86_perf_get_lbr(&lbr);
- if (lbr.nr)
+ x86_perf_get_lbr(&vmx_lbr_caps);
+
+ /*
+ * KVM requires LBR callstack support, as the overhead due to
+ * context switching LBRs without said support is too high.
+ * See intel_pmu_create_guest_lbr_event() for more info.
+ */
+ if (!vmx_lbr_caps.has_callstack)
+ memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
+ else if (vmx_lbr_caps.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
}
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
- if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
- perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+
+ /*
+ * Disallow adaptive PEBS as it is functionally broken, can be
+ * used by the guest to read *host* LBRs, and can be used to
+ * bypass userspace event filters. To correctly and safely
+ * support adaptive PEBS, KVM needs to:
+ *
+ * 1. Account for the ADAPTIVE flag when (re)programming fixed
+ * counters.
+ *
+ * 2. Gain support from perf (or take direct control of counter
+ * programming) to support events without adaptive PEBS
+ * enabled for the hardware counter.
+ *
+ * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
+ * adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
+ *
+ * 4. Document which PMU events are effectively exposed to the
+ * guest via adaptive PEBS, and make adaptive PEBS mutually
+ * exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
+ */
+ perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 65786dbe7d60..90f9e4434646 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -15,6 +15,7 @@
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
+#include "../mmu.h"
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
@@ -109,6 +110,8 @@ struct lbr_desc {
bool msr_passthrough;
};
+extern struct x86_pmu_lbr vmx_lbr_caps;
+
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -719,7 +722,8 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
if (!enable_ept)
return true;
- return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+ return allow_smaller_maxphyaddr &&
+ cpuid_maxphyaddr(vcpu) < kvm_get_shadow_phys_bits();
}
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47d9f03b7778..91478b769af0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1621,7 +1621,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
+ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
static u64 kvm_get_arch_capabilities(void)
{
@@ -3470,7 +3470,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
- if (guest_cpuid_is_amd_or_hygon(vcpu))
+ if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 721b528da9ac..391059b2c6fb 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -163,6 +163,7 @@ SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
lfence
jmp srso_alias_return_thunk
SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
.popsection
.pushsection .text..__x86.rethunk_safe
@@ -224,10 +225,16 @@ SYM_CODE_START(srso_return_thunk)
SYM_CODE_END(srso_return_thunk)
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
#else /* !CONFIG_MITIGATION_SRSO */
+/* Dummy for the alternative in CALL_UNTRAIN_RET. */
+SYM_CODE_START(srso_alias_untrain_ret)
+ ANNOTATE_UNRET_SAFE
+ ANNOTATE_NOENDBR
+ ret
+ int3
+SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
#define JMP_SRSO_UNTRAIN_RET "ud2"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
#endif /* CONFIG_MITIGATION_SRSO */
#ifdef CONFIG_MITIGATION_UNRET_ENTRY
@@ -319,9 +326,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
SYM_FUNC_START(entry_untrain_ret)
- ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
- JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
- JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
+ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
@@ -377,8 +382,15 @@ SYM_FUNC_END(call_depth_return_thunk)
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \
+ defined(CONFIG_MITIGATION_SRSO) || \
+ defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)
ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
"jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
+#else
+ ANNOTATE_UNRET_SAFE
+ ret
+#endif
int3
SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index a204a332c71f..968d7005f4a7 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
- bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
- /* if this is already a gbpage, this portion is already mapped */
- if (pud_leaf(*pud))
- continue;
-
- /* Is using a gbpage allowed? */
- use_gbpage = info->direct_gbpages;
-
- /* Don't use gbpage if it maps more than the requested region. */
- /* at the begining: */
- use_gbpage &= ((addr & ~PUD_MASK) == 0);
- /* ... or at the end: */
- use_gbpage &= ((next & ~PUD_MASK) == 0);
-
- /* Never overwrite existing mappings */
- use_gbpage &= !pud_present(*pud);
-
- if (use_gbpage) {
+ if (info->direct_gbpages) {
pud_t pudval;
+ if (pud_present(*pud))
+ continue;
+
+ addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 70b91de2e053..422602f6039b 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -492,6 +492,24 @@ void __init sme_early_init(void)
*/
if (sev_status & MSR_AMD64_SEV_ENABLED)
ia32_disable();
+
+ /*
+ * Override init functions that scan the ROM region in SEV-SNP guests,
+ * as this memory is not pre-validated and would thus cause a crash.
+ */
+ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
+ x86_init.mpparse.find_mptable = x86_init_noop;
+ x86_init.pci.init_irq = x86_init_noop;
+ x86_init.resources.probe_roms = x86_init_noop;
+
+ /*
+ * DMI setup behavior for SEV-SNP guests depends on
+ * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
+ * parsed yet. snp_dmi_setup() will run after that
+ * parsing has happened.
+ */
+ x86_init.resources.dmi_setup = snp_dmi_setup;
+ }
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 104544359d69..025fd7ea5d69 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,6 +24,7 @@
#include <linux/memblock.h>
#include <linux/init.h>
+#include <asm/pgtable_areas.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 0d72183b5dd0..36b603d0cdde 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -947,6 +947,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
memtype_free(paddr, paddr + size);
}
+static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
+ pgprot_t *pgprot)
+{
+ unsigned long prot;
+
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
+
+ /*
+ * We need the starting PFN and cachemode used for track_pfn_remap()
+ * that covered the whole VMA. For most mappings, we can obtain that
+ * information from the page tables. For COW mappings, we might now
+ * suddenly have anon folios mapped and follow_phys() will fail.
+ *
+ * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
+ * detect the PFN. If we need the cachemode as well, we're out of luck
+ * for now and have to fail fork().
+ */
+ if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
+ if (pgprot)
+ *pgprot = __pgprot(prot);
+ return 0;
+ }
+ if (is_cow_mapping(vma->vm_flags)) {
+ if (pgprot)
+ return -EINVAL;
+ *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
+ return 0;
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
/*
* track_pfn_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
@@ -957,20 +989,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
int track_pfn_copy(struct vm_area_struct *vma)
{
resource_size_t paddr;
- unsigned long prot;
unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
if (vma->vm_flags & VM_PAT) {
- /*
- * reserve the whole chunk covered by vma. We need the
- * starting address and protection from pte.
- */
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
- WARN_ON_ONCE(1);
+ if (get_pat_info(vma, &paddr, &pgprot))
return -EINVAL;
- }
- pgprot = __pgprot(prot);
+ /* reserve the whole chunk covered by vma. */
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
@@ -1045,7 +1070,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size, bool mm_wr_locked)
{
resource_size_t paddr;
- unsigned long prot;
if (vma && !(vma->vm_flags & VM_PAT))
return;
@@ -1053,11 +1077,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
/* free the chunk starting from pfn or the whole chunk */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
if (!paddr && !size) {
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
- WARN_ON_ONCE(1);
+ if (get_pat_info(vma, &paddr, NULL))
return;
- }
-
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a7ba8e178645..df5fac428408 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{
OPTIMIZER_HIDE_VAR(func);
- x86_call_depth_emit_accounting(pprog, func);
+ ip += x86_call_depth_emit_accounting(pprog, func, ip);
return emit_patch(pprog, func, ip, 0xE8);
}
@@ -1972,20 +1972,17 @@ populate_extable:
/* call */
case BPF_JMP | BPF_CALL: {
- int offs;
+ u8 *ip = image + addrs[i - 1];
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
- if (!imm32)
- return -EINVAL;
- offs = 7 + x86_call_depth_emit_accounting(&prog, func);
- } else {
- if (!imm32)
- return -EINVAL;
- offs = x86_call_depth_emit_accounting(&prog, func);
+ ip += 7;
}
- if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+ if (!imm32)
+ return -EINVAL;
+ ip += x86_call_depth_emit_accounting(&prog, func, ip);
+ if (emit_call(&prog, func, ip))
return -EINVAL;
break;
}
@@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* Direct-call fentry stub, as such it needs accounting for the
* __fentry__ call.
*/
- x86_call_depth_emit_accounting(&prog, NULL);
+ x86_call_depth_emit_accounting(&prog, NULL, image);
}
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
diff --git a/arch/x86/virt/Makefile b/arch/x86/virt/Makefile
index 1e36502cd738..ea343fc392dc 100644
--- a/arch/x86/virt/Makefile
+++ b/arch/x86/virt/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += vmx/
+obj-y += svm/ vmx/
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index cffe1157a90a..ab0e8448bb6e 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu)
{
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu)
{
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void)
u64 rmptable_size;
u64 val;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
if (!amd_iommu_snp_en)
- return 0;
+ goto nosnp;
if (!probed_rmp_size)
goto nosnp;
@@ -225,7 +225,7 @@ skip_enable:
return 0;
nosnp:
- setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return -ENOSYS;
}
@@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
{
struct rmpentry *large_entry, *entry;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return ERR_PTR(-ENODEV);
entry = get_rmpentry(pfn);
@@ -363,7 +363,7 @@ int psmash(u64 pfn)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
if (!pfn_valid(pfn))
@@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize);
@@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
spin_unlock(&snp_leaked_pages_list_lock);
}
EXPORT_SYMBOL_GPL(snp_leak_pages);
+
+void kdump_sev_callback(void)
+{
+ /*
+ * Do wbinvd() on remote CPUs when SNP is enabled in order to
+ * safely do SNP_SHUTDOWN on the local CPU.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ wbinvd();
+}
diff --git a/block/bdev.c b/block/bdev.c
index 7a5f611c3d2e..da2a167a4d08 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -583,9 +583,6 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock);
-
- if (hops && hops->get_holder)
- hops->get_holder(holder);
}
/**
@@ -608,7 +605,6 @@ EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder)
{
struct block_device *whole = bdev_whole(bdev);
- const struct blk_holder_ops *hops = bdev->bd_holder_ops;
bool unblock = false;
/*
@@ -631,9 +627,6 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
whole->bd_holder = NULL;
mutex_unlock(&bdev_lock);
- if (hops && hops->put_holder)
- hops->put_holder(holder);
-
/*
* If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder.
@@ -652,6 +645,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
bdev_write_inode(bdev);
}
+static void blkdev_put_whole(struct block_device *bdev)
+{
+ if (atomic_dec_and_test(&bdev->bd_openers))
+ blkdev_flush_mapping(bdev);
+ if (bdev->bd_disk->fops->release)
+ bdev->bd_disk->fops->release(bdev->bd_disk);
+}
+
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
@@ -670,20 +671,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev);
- if (test_bit(GD_NEED_PART_SCAN, &disk->state))
- bdev_disk_changed(disk, false);
atomic_inc(&bdev->bd_openers);
+ if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
+ /*
+ * Only return scanning errors if we are called from contexts
+ * that explicitly want them, e.g. the BLKRRPART ioctl.
+ */
+ ret = bdev_disk_changed(disk, false);
+ if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
+ blkdev_put_whole(bdev);
+ return ret;
+ }
+ }
return 0;
}
-static void blkdev_put_whole(struct block_device *bdev)
-{
- if (atomic_dec_and_test(&bdev->bd_openers))
- blkdev_flush_mapping(bdev);
- if (bdev->bd_disk->fops->release)
- bdev->bd_disk->fops->release(bdev->bd_disk);
-}
-
static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{
struct gendisk *disk = part->bd_disk;
@@ -776,17 +778,17 @@ void blkdev_put_no_open(struct block_device *bdev)
static bool bdev_writes_blocked(struct block_device *bdev)
{
- return bdev->bd_writers == -1;
+ return bdev->bd_writers < 0;
}
static void bdev_block_writes(struct block_device *bdev)
{
- bdev->bd_writers = -1;
+ bdev->bd_writers--;
}
static void bdev_unblock_writes(struct block_device *bdev)
{
- bdev->bd_writers = 0;
+ bdev->bd_writers++;
}
static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
@@ -813,6 +815,11 @@ static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
bdev->bd_writers++;
}
+static inline bool bdev_unclaimed(const struct file *bdev_file)
+{
+ return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
+}
+
static void bdev_yield_write_access(struct file *bdev_file)
{
struct block_device *bdev;
@@ -820,14 +827,15 @@ static void bdev_yield_write_access(struct file *bdev_file)
if (bdev_allow_write_mounted)
return;
+ if (bdev_unclaimed(bdev_file))
+ return;
+
bdev = file_bdev(bdev_file);
- /* Yield exclusive or shared write access. */
- if (bdev_file->f_mode & FMODE_WRITE) {
- if (bdev_writes_blocked(bdev))
- bdev_unblock_writes(bdev);
- else
- bdev->bd_writers--;
- }
+
+ if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
+ bdev_unblock_writes(bdev);
+ else if (bdev_file->f_mode & FMODE_WRITE)
+ bdev->bd_writers--;
}
/**
@@ -874,7 +882,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
goto abort_claiming;
ret = -EBUSY;
if (!bdev_may_open(bdev, mode))
- goto abort_claiming;
+ goto put_module;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
@@ -907,6 +915,8 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
if (bdev_nowait(bdev))
bdev_file->f_mode |= FMODE_NOWAIT;
+ if (mode & BLK_OPEN_RESTRICT_WRITES)
+ bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
bdev_file->f_mapping = bdev->bd_inode->i_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
@@ -1012,6 +1022,20 @@ struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
}
EXPORT_SYMBOL(bdev_file_open_by_path);
+static inline void bd_yield_claim(struct file *bdev_file)
+{
+ struct block_device *bdev = file_bdev(bdev_file);
+ void *holder = bdev_file->private_data;
+
+ lockdep_assert_held(&bdev->bd_disk->open_mutex);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
+ return;
+
+ if (!bdev_unclaimed(bdev_file))
+ bd_end_claim(bdev, holder);
+}
+
void bdev_release(struct file *bdev_file)
{
struct block_device *bdev = file_bdev(bdev_file);
@@ -1036,7 +1060,7 @@ void bdev_release(struct file *bdev_file)
bdev_yield_write_access(bdev_file);
if (holder)
- bd_end_claim(bdev, holder);
+ bd_yield_claim(bdev_file);
/*
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
@@ -1057,6 +1081,39 @@ put_no_open:
}
/**
+ * bdev_fput - yield claim to the block device and put the file
+ * @bdev_file: open block device
+ *
+ * Yield claim on the block device and put the file. Ensure that the
+ * block device can be reclaimed before the file is closed which is a
+ * deferred operation.
+ */
+void bdev_fput(struct file *bdev_file)
+{
+ if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
+ return;
+
+ if (bdev_file->private_data) {
+ struct block_device *bdev = file_bdev(bdev_file);
+ struct gendisk *disk = bdev->bd_disk;
+
+ mutex_lock(&disk->open_mutex);
+ bdev_yield_write_access(bdev_file);
+ bd_yield_claim(bdev_file);
+ /*
+ * Tell release we already gave up our hold on the
+ * device and if write restrictions are available that
+ * we already gave up write access to the device.
+ */
+ bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
+ mutex_unlock(&disk->open_mutex);
+ }
+
+ fput(bdev_file);
+}
+EXPORT_SYMBOL(bdev_fput);
+
+/**
* lookup_bdev() - Look up a struct block_device by name.
* @pathname: Name of the block device in the filesystem.
* @dev: Pointer to the block device's dev_t, if found.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bdbb557feb5a..059467086b13 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
return 0;
}
+void blkg_init_queue(struct request_queue *q)
+{
+ INIT_LIST_HEAD(&q->blkg_list);
+ mutex_init(&q->blkcg_mutex);
+}
+
int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
bool preloaded;
int ret;
- INIT_LIST_HEAD(&q->blkg_list);
- mutex_init(&q->blkcg_mutex);
-
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 78b74106bf10..90b3959d88cf 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -189,6 +189,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats;
+void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk);
@@ -482,6 +483,7 @@ struct blkcg {
};
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-core.c b/block/blk-core.c
index a16b5abdbbf5..b795ac177281 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
+ blkg_init_queue(q);
+
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
@@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
+ plug->cur_ktime = 0;
current->flags &= ~PF_BLOCK_TS;
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 9a85bfbbc45a..690ca99dfaca 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
{
struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
- u64 tdelta, delay, new_delay;
+ u64 tdelta, delay, new_delay, shift;
s64 vover, vover_pct;
u32 hwa;
@@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
/* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at;
- if (iocg->delay)
- delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
+ shift = div64_u64(tdelta, USEC_PER_SEC);
+ if (iocg->delay && shift < BITS_PER_LONG)
+ delay = iocg->delay >> shift;
else
delay = 0;
@@ -1438,8 +1439,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
lockdep_assert_held(&iocg->ioc->lock);
lockdep_assert_held(&iocg->waitq.lock);
- /* make sure that nobody messed with @iocg */
- WARN_ON_ONCE(list_empty(&iocg->active_list));
+ /*
+ * make sure that nobody messed with @iocg. Check iocg->pd.online
+ * to avoid warn when removing blkcg or disk.
+ */
+ WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
WARN_ON_ONCE(iocg->inuse > 1);
iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2a06fd33039d..4e3483a16b75 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -726,7 +726,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
* which can be mixed are set in each bio and mark @rq as mixed
* merged.
*/
-void blk_rq_set_mixed_merge(struct request *rq)
+static void blk_rq_set_mixed_merge(struct request *rq)
{
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 555ada922cf0..32afb87efbd0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -770,16 +770,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
/*
* Partial zone append completions cannot be supported as the
* BIO fragments may end up not being written sequentially.
- * For such case, force the completed nbytes to be equal to
- * the BIO size so that bio_advance() sets the BIO remaining
- * size to 0 and we end up calling bio_endio() before returning.
*/
- if (bio->bi_iter.bi_size != nbytes) {
+ if (bio->bi_iter.bi_size != nbytes)
bio->bi_status = BLK_STS_IOERR;
- nbytes = bio->bi_iter.bi_size;
- } else {
+ else
bio->bi_iter.bi_sector = rq->__sector;
- }
}
bio_advance(bio, nbytes);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3c7d8d638ab5..d2731843f2fc 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -146,8 +146,7 @@ static int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors > max_hw_sectors ||
- lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else {
@@ -183,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim)
return -EINVAL;
/*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be identical to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
+ * Stacking device may have both virtual boundary and max segment
+ * size limit, so allow this setting now, and long-term the two
+ * might need to move out of stacking limits since we have immutable
+ * bvec and lower layer bio splitting is supposed to handle the two
+ * correctly.
*/
- if (lim->virt_boundary_mask) {
- if (WARN_ON_ONCE(lim->max_segment_size &&
- lim->max_segment_size != UINT_MAX))
- return -EINVAL;
- lim->max_segment_size = UINT_MAX;
- } else {
+ if (!lim->virt_boundary_mask) {
/*
* The maximum segment size has an odd historic 64k default that
* drivers probably should override. Just like the I/O size we
diff --git a/block/blk.h b/block/blk.h
index 5cac4e29ae17..d9f584984bc4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -339,7 +339,6 @@ int ll_back_merge_fn(struct request *req, struct bio *bio,
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
-void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
diff --git a/block/ioctl.c b/block/ioctl.c
index 0c76137adcaa..f505f9c341eb 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -96,7 +96,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
uint64_t range[2];
- uint64_t start, len;
+ uint64_t start, len, end;
struct inode *inode = bdev->bd_inode;
int err;
@@ -117,7 +117,8 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
if (len & 511)
return -EINVAL;
- if (start + len > bdev_nr_bytes(bdev))
+ if (check_add_overflow(start, len, &end) ||
+ end > bdev_nr_bytes(bdev))
return -EINVAL;
filemap_invalidate_lock(inode->i_mapping);
@@ -562,7 +563,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
- return disk_scan_partitions(bdev->bd_disk, mode);
+ return disk_scan_partitions(bdev->bd_disk,
+ mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 05402ef8964e..8aecbe4637f3 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,6 +75,9 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
+ case OID_sha1:
+ ctx->digest_algo = "sha1";
+ break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 5b08c50722d0..231ad7b3789d 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,6 +227,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
+ case OID_sha1:
+ ctx->sinfo->sig->hash_algo = "sha1";
+ break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
@@ -278,6 +281,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
+ case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e5f22691febd..e314fd57e6f8 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -115,7 +115,8 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (!hash_algo)
return -EINVAL;
- if (strcmp(hash_algo, "sha224") != 0 &&
+ if (strcmp(hash_algo, "sha1") != 0 &&
+ strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
strcmp(hash_algo, "sha512") != 0 &&
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 398983be77e8..2deff81f8af5 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob);
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha512").
+ * passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 487204d39426..bb0bffa271b5 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -198,6 +198,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
default:
return -ENOPKG; /* Unsupported combination */
+ case OID_sha1WithRSAEncryption:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto rsa_pkcs1;
+
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
@@ -214,6 +218,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+ case OID_id_ecdsa_with_sha1:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto ecdsa;
+
case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
ctx->cert->sig->hash_algo = "sha3-256";
goto rsa_pkcs1;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 986f331a5fc2..12e1c892f366 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -653,6 +653,30 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+ "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+ "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+ "\x80\x6f\xa5\x79\x77\xda\xd0",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
@@ -756,6 +780,32 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+ "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+ "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+ "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+ "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
@@ -866,6 +916,36 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+ "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+ "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+ "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+ "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+ "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+ "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index d09d29775b3f..e07e447d08d1 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -3,6 +3,8 @@
* Copyright (C) 2020-2023 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 39f6d1b98fd6..51d3f1a55d02 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
- int ret;
-
- ret = ivpu_rpm_get_if_active(vdev);
- if (ret < 0)
- return ret;
-
- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
- if (ret)
- ivpu_rpm_put(vdev);
-
- return 0;
-}
-
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- ret = ivpu_get_core_clock_rate(vdev, &args->value);
+ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
- ivpu_prepare_for_reset(vdev);
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
return ret;
}
@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -601,10 +588,8 @@ err_mmu_rctx_fini:
ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
-err_power_down:
- ivpu_hw_power_down(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_shutdown:
+ ivpu_shutdown(vdev);
err_xa_destroy:
xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
+ ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 7be0500d9bb8..bb4374d0eaec 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -90,7 +90,6 @@
struct ivpu_wa_table {
bool punit_disabled;
bool clear_runtime_mem;
- bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index b2909168a0a6..094c659d2800 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9a0c9498baba..bd25e2d9fb0f 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
- vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0);
}
@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
- if ((config & 0xff) == PLL_RATIO_4_3)
+ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index e4eddbf5d11c..b0b88d4c8926 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio);
+}
+
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 04ac4b9840fb..56ff067f63e2 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
- drmm_mutex_init(&vdev->drm, &ipc->lock);
+ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+ goto err_free_rx;
+ }
ivpu_ipc_reset(vdev);
return 0;
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 91bd640655ab..2e46b322c450 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
- return "Unknown CMDQ command";
+ return "Unknown event";
}
}
@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{
switch (err) {
case IVPU_MMU_CERROR_NONE:
- return "No CMDQ Error";
+ return "No error";
case IVPU_MMU_CERROR_ILL:
return "Illegal command";
case IVPU_MMU_CERROR_ABT:
- return "External abort on CMDQ read";
+ return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation";
default:
- return "Unknown CMDQ Error";
+ return "Unknown error";
}
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 7cce1c928a7f..4f5ea466731f 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/highmem.h>
@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
- /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
- pci_save_state(to_pci_dev(vdev->drm.dev));
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_shutdown(vdev);
if (ret)
- ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+ ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
return ret;
}
@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
-retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@@ -100,6 +97,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 3f7f6dfde7f2..35e883515629 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,4 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
- qaic_timesync.o
+ qaic_timesync.o \
+ sahara.o
+
+qaic-$(CONFIG_DEBUG_FS) += qaic_debugfs.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 9256653b3036..02561b6cecc6 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -153,6 +153,14 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* Head of list of page allocated by MHI bootlog device */
+ struct list_head bootlog;
+ /* MHI bootlog channel device */
+ struct mhi_device *bootlog_ch;
+ /* Work queue for tasks related to MHI bootlog device */
+ struct workqueue_struct *bootlog_wq;
+ /* Synchronizes access of pages in MHI bootlog device */
+ struct mutex bootlog_mutex;
};
struct qaic_drm_device {
@@ -280,6 +288,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
void wake_all_cntl(struct qaic_device *qdev);
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 2459fe4a3f95..e86e71c1cdd8 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -1981,3 +1981,12 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->in_use = false;
wake_up(&dbc->dbc_release);
}
+
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
+{
+ if (!dbc || !head || !tail)
+ return;
+
+ *head = readl(dbc->dbc_base + REQHP_OFF);
+ *tail = readl(dbc->dbc_base + REQTP_OFF);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
new file mode 100644
index 000000000000..20b653d99e52
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_debugfs.h"
+
+#define BOOTLOG_POOL_SIZE 16
+#define BOOTLOG_MSG_SIZE 512
+#define QAIC_DBC_DIR_NAME 9
+
+struct bootlog_msg {
+ /* Buffer for bootlog messages */
+ char str[BOOTLOG_MSG_SIZE];
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Work struct to schedule work coming on QAIC_LOGGING channel */
+ struct work_struct work;
+};
+
+struct bootlog_page {
+ /* Node in list of bootlog pages maintained by root device struct */
+ struct list_head node;
+ /* Total size of the buffer that holds the bootlogs. It is PAGE_SIZE */
+ unsigned int size;
+ /* Offset for the next bootlog */
+ unsigned int offset;
+};
+
+static int bootlog_show(struct seq_file *s, void *unused)
+{
+ struct bootlog_page *page;
+ struct qaic_device *qdev;
+ void *page_end;
+ void *log;
+
+ qdev = s->private;
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry(page, &qdev->bootlog, node) {
+ log = page + 1;
+ page_end = (void *)page + page->offset;
+ while (log < page_end) {
+ seq_printf(s, "%s", (char *)log);
+ log += strlen(log) + 1;
+ }
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ return 0;
+}
+
+static int bootlog_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bootlog_show, inode->i_private);
+}
+
+static const struct file_operations bootlog_fops = {
+ .owner = THIS_MODULE,
+ .open = bootlog_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int read_dbc_fifo_size(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+
+ seq_printf(s, "%u\n", dbc->nelem);
+ return 0;
+}
+
+static int fifo_size_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, read_dbc_fifo_size, inode->i_private);
+}
+
+static const struct file_operations fifo_size_fops = {
+ .owner = THIS_MODULE,
+ .open = fifo_size_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int read_dbc_queued(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+ u32 tail = 0, head = 0;
+
+ qaic_data_get_fifo_info(dbc, &head, &tail);
+
+ if (head == U32_MAX || tail == U32_MAX)
+ seq_printf(s, "%u\n", 0);
+ else if (head > tail)
+ seq_printf(s, "%u\n", dbc->nelem - head + tail);
+ else
+ seq_printf(s, "%u\n", tail - head);
+
+ return 0;
+}
+
+static int queued_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, read_dbc_queued, inode->i_private);
+}
+
+static const struct file_operations queued_fops = {
+ .owner = THIS_MODULE,
+ .open = queued_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void qaic_debugfs_init(struct qaic_drm_device *qddev)
+{
+ struct qaic_device *qdev = qddev->qdev;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_dir;
+ char name[QAIC_DBC_DIR_NAME];
+ u32 i;
+
+ debugfs_root = to_drm(qddev)->debugfs_root;
+
+ debugfs_create_file("bootlog", 0400, debugfs_root, qdev, &bootlog_fops);
+ /*
+ * 256 dbcs per device is likely the max we will ever see and lets static checking see a
+ * reasonable range.
+ */
+ for (i = 0; i < qdev->num_dbc && i < 256; ++i) {
+ snprintf(name, QAIC_DBC_DIR_NAME, "dbc%03u", i);
+ debugfs_dir = debugfs_create_dir(name, debugfs_root);
+ debugfs_create_file("fifo_size", 0400, debugfs_dir, &qdev->dbc[i], &fifo_size_fops);
+ debugfs_create_file("queued", 0400, debugfs_dir, &qdev->dbc[i], &queued_fops);
+ }
+}
+
+static struct bootlog_page *alloc_bootlog_page(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+
+ page = (struct bootlog_page *)devm_get_free_pages(&qdev->pdev->dev, GFP_KERNEL, 0);
+ if (!page)
+ return page;
+
+ page->size = PAGE_SIZE;
+ page->offset = sizeof(*page);
+ list_add_tail(&page->node, &qdev->bootlog);
+
+ return page;
+}
+
+static int reset_bootlog(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+ struct bootlog_page *i;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry_safe(page, i, &qdev->bootlog, node) {
+ list_del(&page->node);
+ devm_free_pages(&qdev->pdev->dev, (unsigned long)page);
+ }
+
+ page = alloc_bootlog_page(qdev);
+ mutex_unlock(&qdev->bootlog_mutex);
+ if (!page)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void *bootlog_get_space(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ if (size_add(size, sizeof(*page)) > page->size)
+ return NULL;
+
+ if (page->offset + size > page->size) {
+ page = alloc_bootlog_page(qdev);
+ if (!page)
+ return NULL;
+ }
+
+ return (void *)page + page->offset;
+}
+
+static void bootlog_commit(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ page->offset += size;
+}
+
+static void bootlog_log(struct work_struct *work)
+{
+ struct bootlog_msg *msg = container_of(work, struct bootlog_msg, work);
+ unsigned int len = strlen(msg->str) + 1;
+ struct qaic_device *qdev = msg->qdev;
+ void *log;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ log = bootlog_get_space(qdev, len);
+ if (log) {
+ memcpy(log, msg, len);
+ bootlog_commit(qdev, len);
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ if (mhi_queue_buf(qdev->bootlog_ch, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT))
+ devm_kfree(&qdev->pdev->dev, msg);
+}
+
+static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct bootlog_msg *msg;
+ int i, ret;
+
+ qdev->bootlog_wq = alloc_ordered_workqueue("qaic_bootlog", 0);
+ if (!qdev->bootlog_wq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = reset_bootlog(qdev);
+ if (ret)
+ goto destroy_workqueue;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto destroy_workqueue;
+
+ for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
+ msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto mhi_unprepare;
+ }
+
+ msg->qdev = qdev;
+ INIT_WORK(&msg->work, bootlog_log);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT);
+ if (ret)
+ goto mhi_unprepare;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+ return 0;
+
+mhi_unprepare:
+ mhi_unprepare_from_transfer(mhi_dev);
+destroy_workqueue:
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+out:
+ return ret;
+}
+
+static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_unprepare_from_transfer(qdev->bootlog_ch);
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+ qdev->bootlog_ch = NULL;
+}
+
+static void qaic_bootlog_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void qaic_bootlog_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct bootlog_msg *msg = mhi_result->buf_addr;
+
+ if (mhi_result->transaction_status) {
+ devm_kfree(&qdev->pdev->dev, msg);
+ return;
+ }
+
+ /* Force a null at the end of the transferred string */
+ msg->str[mhi_result->bytes_xferd - 1] = 0;
+
+ queue_work(qdev->bootlog_wq, &msg->work);
+}
+
+static const struct mhi_device_id qaic_bootlog_mhi_match_table[] = {
+ { .chan = "QAIC_LOGGING", },
+ {},
+};
+
+static struct mhi_driver qaic_bootlog_mhi_driver = {
+ .id_table = qaic_bootlog_mhi_match_table,
+ .remove = qaic_bootlog_mhi_remove,
+ .probe = qaic_bootlog_mhi_probe,
+ .ul_xfer_cb = qaic_bootlog_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_bootlog_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_bootlog",
+ },
+};
+
+int qaic_bootlog_register(void)
+{
+ return mhi_driver_register(&qaic_bootlog_mhi_driver);
+}
+
+void qaic_bootlog_unregister(void)
+{
+ mhi_driver_unregister(&qaic_bootlog_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.h b/drivers/accel/qaic/qaic_debugfs.h
new file mode 100644
index 000000000000..05e74f84cf9f
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __QAIC_DEBUGFS_H__
+#define __QAIC_DEBUGFS_H__
+
+#include <drm/drm_file.h>
+
+#ifdef CONFIG_DEBUG_FS
+int qaic_bootlog_register(void);
+void qaic_bootlog_unregister(void);
+void qaic_debugfs_init(struct qaic_drm_device *qddev);
+#else
+static inline int qaic_bootlog_register(void) { return 0; }
+static inline void qaic_bootlog_unregister(void) {}
+static inline void qaic_debugfs_init(struct qaic_drm_device *qddev) {}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* __QAIC_DEBUGFS_H__ */
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index d1a632dbaec6..580b29ed1902 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -28,7 +28,9 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_debugfs.h"
#include "qaic_timesync.h"
+#include "sahara.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -229,8 +231,12 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
ret = drm_dev_register(drm, 0);
- if (ret)
+ if (ret) {
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
+ return ret;
+ }
+
+ qaic_debugfs_init(qddev);
return ret;
}
@@ -382,6 +388,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
if (ret)
return NULL;
+ ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
+ if (ret)
+ return NULL;
qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
if (IS_ERR(qdev->cntl_wq))
@@ -399,6 +408,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
qddev->qdev = qdev;
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
+ INIT_LIST_HEAD(&qdev->bootlog);
INIT_LIST_HEAD(&qddev->users);
for (i = 0; i < qdev->num_dbc; ++i) {
@@ -635,12 +645,24 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = sahara_register();
+ if (ret) {
+ pr_debug("qaic: sahara_register failed %d\n", ret);
+ goto free_mhi;
+ }
+
ret = qaic_timesync_init();
if (ret)
pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+ ret = qaic_bootlog_register();
+ if (ret)
+ pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+
return 0;
+free_mhi:
+ mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
pci_unregister_driver(&qaic_pci_driver);
return ret;
@@ -664,7 +686,9 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_bootlog_unregister();
qaic_timesync_deinit();
+ sahara_unregister();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
new file mode 100644
index 000000000000..bf94bbab6be5
--- /dev/null
+++ b/drivers/accel/qaic/sahara.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/firmware.h>
+#include <linux/limits.h>
+#include <linux/mhi.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "sahara.h"
+
+#define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */
+#define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */
+#define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */
+#define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */
+#define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */
+#define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */
+#define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */
+#define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */
+#define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */
+#define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */
+#define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */
+#define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */
+#define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */
+#define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */
+#define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */
+#define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */
+#define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */
+
+#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
+#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
+ SAHARA_PACKET_MAX_SIZE)
+#define SAHARA_IMAGE_ID_NONE U32_MAX
+
+#define SAHARA_VERSION 2
+#define SAHARA_SUCCESS 0
+
+#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
+#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
+#define SAHARA_MODE_MEMORY_DEBUG 0x2
+#define SAHARA_MODE_COMMAND 0x3
+
+#define SAHARA_HELLO_LENGTH 0x30
+#define SAHARA_READ_DATA_LENGTH 0x14
+#define SAHARA_END_OF_IMAGE_LENGTH 0x10
+#define SAHARA_DONE_LENGTH 0x8
+#define SAHARA_RESET_LENGTH 0x8
+
+struct sahara_packet {
+ __le32 cmd;
+ __le32 length;
+
+ union {
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 max_length;
+ __le32 mode;
+ } hello;
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 status;
+ __le32 mode;
+ } hello_resp;
+ struct {
+ __le32 image;
+ __le32 offset;
+ __le32 length;
+ } read_data;
+ struct {
+ __le32 image;
+ __le32 status;
+ } end_of_image;
+ };
+};
+
+struct sahara_context {
+ struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
+ struct sahara_packet *rx;
+ struct work_struct work;
+ struct mhi_device *mhi_dev;
+ const char **image_table;
+ u32 table_size;
+ u32 active_image_id;
+ const struct firmware *firmware;
+};
+
+static const char *aic100_image_table[] = {
+ [1] = "qcom/aic100/fw1.bin",
+ [2] = "qcom/aic100/fw2.bin",
+ [4] = "qcom/aic100/fw4.bin",
+ [5] = "qcom/aic100/fw5.bin",
+ [6] = "qcom/aic100/fw6.bin",
+ [8] = "qcom/aic100/fw8.bin",
+ [9] = "qcom/aic100/fw9.bin",
+ [10] = "qcom/aic100/fw10.bin",
+};
+
+static int sahara_find_image(struct sahara_context *context, u32 image_id)
+{
+ int ret;
+
+ if (image_id == context->active_image_id)
+ return 0;
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
+ dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
+ image_id, context->active_image_id);
+ return -EINVAL;
+ }
+
+ if (image_id >= context->table_size || !context->image_table[image_id]) {
+ dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
+ return -EINVAL;
+ }
+
+ /*
+ * This image might be optional. The device may continue without it.
+ * Only the device knows. Suppress error messages that could suggest an
+ * a problem when we were actually able to continue.
+ */
+ ret = firmware_request_nowarn(&context->firmware,
+ context->image_table[image_id],
+ &context->mhi_dev->dev);
+ if (ret) {
+ dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
+ image_id, context->image_table[image_id], ret);
+ return ret;
+ }
+
+ context->active_image_id = image_id;
+
+ return 0;
+}
+
+static void sahara_release_image(struct sahara_context *context)
+{
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
+ release_firmware(context->firmware);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+}
+
+static void sahara_send_reset(struct sahara_context *context)
+{
+ int ret;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_RESET_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
+}
+
+static void sahara_hello(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->hello.version),
+ le32_to_cpu(context->rx->hello.version_compat),
+ le32_to_cpu(context->rx->hello.max_length),
+ le32_to_cpu(context->rx->hello.mode));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+ if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
+ le32_to_cpu(context->rx->hello.version));
+ return;
+ }
+
+ if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
+ le32_to_cpu(context->rx->hello.mode));
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
+ context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
+ context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_HELLO_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
+}
+
+static void sahara_read_data(struct sahara_context *context)
+{
+ u32 image_id, data_offset, data_len, pkt_data_len;
+ int ret;
+ int i;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->read_data.image),
+ le32_to_cpu(context->rx->read_data.offset),
+ le32_to_cpu(context->rx->read_data.length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ image_id = le32_to_cpu(context->rx->read_data.image);
+ data_offset = le32_to_cpu(context->rx->read_data.offset);
+ data_len = le32_to_cpu(context->rx->read_data.length);
+
+ ret = sahara_find_image(context, image_id);
+ if (ret) {
+ sahara_send_reset(context);
+ return;
+ }
+
+ /*
+ * Image is released when the device is done with it via
+ * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
+ * device to retry the operation with a modification, or decide to be
+ * done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
+ * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
+ * and is not needed here on error.
+ */
+
+ if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
+ data_len, SAHARA_TRANSFER_MAX_SIZE);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (data_offset >= context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
+ data_offset, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (size_add(data_offset, data_len) > context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
+ data_offset, data_len, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
+ pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+
+ data_offset += pkt_data_len;
+ data_len -= pkt_data_len;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[i], pkt_data_len,
+ !data_len ? MHI_EOT : MHI_CHAIN);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+static void sahara_end_of_image(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->end_of_image.image),
+ le32_to_cpu(context->rx->end_of_image.status));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
+ le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
+ le32_to_cpu(context->rx->end_of_image.image));
+ return;
+ }
+
+ sahara_release_image(context);
+
+ if (le32_to_cpu(context->rx->end_of_image.status))
+ return;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_DONE_LENGTH, MHI_EOT);
+ if (ret)
+ dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
+}
+
+static void sahara_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, work);
+ int ret;
+
+ switch (le32_to_cpu(context->rx->cmd)) {
+ case SAHARA_HELLO_CMD:
+ sahara_hello(context);
+ break;
+ case SAHARA_READ_DATA_CMD:
+ sahara_read_data(context);
+ break;
+ case SAHARA_END_OF_IMAGE_CMD:
+ sahara_end_of_image(context);
+ break;
+ case SAHARA_DONE_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ default:
+ dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
+ le32_to_cpu(context->rx->cmd));
+ break;
+ }
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+}
+
+static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct sahara_context *context;
+ int ret;
+ int i;
+
+ context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->rx)
+ return -ENOMEM;
+
+ /*
+ * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
+ * will request for READ_DATA. This is larger than
+ * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
+ * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
+ * READ_DATA, it requires a transfer of the exact size requested. We
+ * can use MHI_CHAIN to link multiple buffers into a single transfer
+ * but the remote side will not consume the buffers until it sees an
+ * EOT, thus we need to allocate enough buffers to put in the tx fifo
+ * to cover an entire READ_DATA request of the max size.
+ */
+ for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->tx[i])
+ return -ENOMEM;
+ }
+
+ context->mhi_dev = mhi_dev;
+ INIT_WORK(&context->work, sahara_processing);
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+ dev_set_drvdata(&mhi_dev->dev, context);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sahara_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ cancel_work_sync(&context->work);
+ sahara_release_image(context);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status)
+ schedule_work(&context->work);
+}
+
+static const struct mhi_device_id sahara_mhi_match_table[] = {
+ { .chan = "QAIC_SAHARA", },
+ {},
+};
+
+static struct mhi_driver sahara_mhi_driver = {
+ .id_table = sahara_mhi_match_table,
+ .remove = sahara_mhi_remove,
+ .probe = sahara_mhi_probe,
+ .ul_xfer_cb = sahara_mhi_ul_xfer_cb,
+ .dl_xfer_cb = sahara_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "sahara",
+ },
+};
+
+int sahara_register(void)
+{
+ return mhi_driver_register(&sahara_mhi_driver);
+}
+
+void sahara_unregister(void)
+{
+ mhi_driver_unregister(&sahara_mhi_driver);
+}
diff --git a/drivers/accel/qaic/sahara.h b/drivers/accel/qaic/sahara.h
new file mode 100644
index 000000000000..640208acc0d1
--- /dev/null
+++ b/drivers/accel/qaic/sahara.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __SAHARA_H__
+#define __SAHARA_H__
+
+int sahara_register(void);
+void sahara_unregister(void);
+#endif /* __SAHARA_H__ */
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921c4f..736c2eb8c0f3 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index b91155ea9c34..c9131259f717 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
ACPI_FREE(buffer.pointer);
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
-
+ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not evaluate object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
/*
* Since this is a field unit, surround the output in braces
*/
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 66e7f529e92f..01faca3a238a 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -851,7 +851,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void einj_remove(struct platform_device *pdev)
{
struct apei_exec_context ctx;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 4bfbe55553f4..a40b6f3946ef 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
- GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -1002,14 +1002,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1018,17 +1018,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
switch (size) {
case 8:
@@ -1044,8 +1049,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
@@ -1063,12 +1073,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1076,17 +1087,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
@@ -1105,8 +1121,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7c157bf92695..d1464324de95 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1843,7 +1843,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
- adev->dep_unmet++;
+ if (!dep->met)
+ adev->dep_unmet++;
}
}
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 302dce0b2b50..d67881b50bca 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -662,14 +662,15 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz,
{
int result;
- tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
- trip_table,
- trip_count,
- tz,
- &acpi_thermal_zone_ops,
- NULL,
- passive_delay,
- tz->polling_frequency * 100);
+ if (trip_count)
+ tz->thermal_zone = thermal_zone_device_register_with_trips(
+ "acpitz", trip_table, trip_count, tz,
+ &acpi_thermal_zone_ops, NULL, passive_delay,
+ tz->polling_frequency * 100);
+ else
+ tz->thermal_zone = thermal_tripless_zone_device_register(
+ "acpitz", tz, &acpi_thermal_zone_ops, NULL);
+
if (IS_ERR(tz->thermal_zone))
return PTR_ERR(tz->thermal_zone);
@@ -901,11 +902,8 @@ static int acpi_thermal_add(struct acpi_device *device)
trip++;
}
- if (trip == trip_table) {
+ if (trip == trip_table)
pr_warn(FW_BUG "No valid trip points!\n");
- result = -ENODEV;
- goto free_memory;
- }
result = acpi_thermal_register_thermal_zone(tz, trip_table,
trip - trip_table,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index cd84af23f7ea..dd0b40b9bbe8 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
unsigned int func_mask;
/*
- * Avoid evaluating the same _DSM function for two
- * different UUIDs and prioritize the MSFT one.
+ * Log a message if the _DSM function sets for two
+ * different UUIDs overlap.
*/
func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
- if (func_mask) {
+ if (func_mask)
acpi_handle_info(adev->handle,
"Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
func_mask);
- lps0_dsm_func_mask &= ~func_mask;
- }
}
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bad28cf42010..dd6923d37931 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 562302e2e57c..6548f10e61d9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -666,6 +666,87 @@ static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+ "32-bits port map masks to ignore controllers ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv, char *mask_s)
+{
+ unsigned int mask;
+
+ if (kstrtouint(mask_s, 0, &mask)) {
+ dev_err(dev, "Invalid port map mask\n");
+ return;
+ }
+
+ hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv)
+{
+ char *param, *end, *str, *mask_s;
+ char *name;
+
+ if (!strlen(ahci_mask_port_map))
+ return;
+
+ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ if (!str)
+ return;
+
+ /* Handle single mask case */
+ if (!strchr(str, '=')) {
+ ahci_apply_port_map_mask(dev, hpriv, str);
+ goto free;
+ }
+
+ /*
+ * Mask list case: parse the parameter to apply the mask only if
+ * the device name matches.
+ */
+ param = str;
+ end = param + strlen(param);
+ while (param && param < end && *param) {
+ name = param;
+ param = strchr(name, '=');
+ if (!param)
+ break;
+
+ *param = '\0';
+ param++;
+ if (param >= end)
+ break;
+
+ if (strcmp(dev_name(dev), name) != 0) {
+ param = strchr(param, ',');
+ if (param)
+ param++;
+ continue;
+ }
+
+ mask_s = param;
+ param = strchr(mask_s, ',');
+ if (param) {
+ *param = '\0';
+ param++;
+ }
+
+ ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ }
+
+free:
+ kfree(str);
+}
+
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
@@ -688,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
+ /* Handle port map masks passed as module parameter. */
+ if (ahci_mask_port_map)
+ ahci_get_port_map_mask(&pdev->dev, hpriv);
+
ahci_save_initial_config(&pdev->dev, hpriv);
}
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index d4a626f87963..79a8b0aa37bf 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -30,7 +30,6 @@
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
struct st_ahci_drv_data {
- struct platform_device *ahci;
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be3412cdb22e..c449d60d9bb9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
- if (ata_id_major_version(dev->id) < 12)
+ if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b0d6e69c4a5b..214b935c2ced 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -712,8 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_ncq_enabled |= 1 << devno;
/* If we are resuming, wake up the device */
- if (ap->pflags & ATA_PFLAG_RESUMING)
+ if (ap->pflags & ATA_PFLAG_RESUMING) {
+ dev->flags |= ATA_DFLAG_RESUMING;
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ }
}
}
@@ -3169,6 +3171,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
return 0;
err:
+ dev->flags &= ~ATA_DFLAG_RESUMING;
*r_failed_dev = dev;
return rc;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0a0f483124c3..e954976891a9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4730,6 +4730,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
+ bool do_resume;
int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
@@ -4744,25 +4745,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
+ do_resume = dev->flags & ATA_DFLAG_RESUMING;
+
spin_unlock_irqrestore(ap->lock, flags);
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+ goto unlock_scan;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 4ac854f6b057..88b2e9817f49 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1371,9 +1371,6 @@ static struct pci_driver pata_macio_pci_driver = {
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
- .driver = {
- .owner = THIS_MODULE,
- },
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 400b22ee99c3..4c270999ba3c 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
pclk = sg->sata0_pclk;
else
pclk = sg->sata1_pclk;
- clk_enable(pclk);
+ ret = clk_enable(pclk);
+ if (ret)
+ return ret;
+
msleep(10);
/* Do not keep clocking a bridge that is not online */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index e82786c63fbd..9bec0aee92e0 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
},
};
-static const struct pci_device_id mv_pci_tbl[] = {
- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
- /* RocketRAID 1720/174x have different identifiers */
- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
-
- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
-
- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
-
- /* Adaptec 1430SA */
- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
-
- /* Marvell 7042 support */
- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
-
- /* Highpoint RocketRAID PCIe series */
- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
-
- { } /* terminate list */
-};
-
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
@@ -4303,6 +4272,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
+static const struct pci_device_id mv_pci_tbl[] = {
+ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+ /* RocketRAID 1720/174x have different identifiers */
+ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+ /* Adaptec 1430SA */
+ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+ /* Marvell 7042 support */
+ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+ /* Highpoint RocketRAID PCIe series */
+ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+ { } /* terminate list */
+};
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
@@ -4315,6 +4314,7 @@ static struct pci_driver mv_pci_driver = {
#endif
};
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
/**
* mv_print_info - Dump key info to kernel log for perusal.
@@ -4487,7 +4487,6 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index b51d7a9d0d90..a482741eb181 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
offset -= (idx * window_size);
idx++;
- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
@@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
- dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b93f3c5716ae..5f4e03336e68 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
+static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
@@ -533,12 +534,26 @@ static void devlink_dev_release(struct device *dev)
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
- * supplier devices get deleted when it runs, so put it into the "long"
- * workqueue.
+ * supplier devices get deleted when it runs, so put it into the
+ * dedicated workqueue.
*/
- queue_work(system_long_wq, &link->rm_work);
+ queue_work(device_link_wq, &link->rm_work);
}
+/**
+ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
+ */
+void device_link_wait_removal(void)
+{
+ /*
+ * devlink removal jobs are queued in the dedicated work queue.
+ * To be sure that all removal jobs are terminated, ensure that any
+ * scheduled work has run to completion.
+ */
+ flush_workqueue(device_link_wq);
+}
+EXPORT_SYMBOL_GPL(device_link_wait_removal);
+
static struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
@@ -4164,9 +4179,14 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
+ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ if (!device_link_wq)
+ goto wq_err;
return 0;
+ wq_err:
+ kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 7e2d1f0d903a..82aeb09b3d1b 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -305,6 +305,29 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
}
/**
+ * dev_coredump_put - remove device coredump
+ * @dev: the struct device for the crashed device
+ *
+ * dev_coredump_put() removes coredump, if exists, for a given device from
+ * the file system and free its associated data otherwise, does nothing.
+ *
+ * It is useful for modules that do not want to keep coredump
+ * available after its unload.
+ */
+void dev_coredump_put(struct device *dev)
+{
+ struct device *existing;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ devcd_free(existing, NULL);
+ put_device(existing);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_coredump_put);
+
+/**
* dev_coredumpm - create device coredump with read/free methods
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 41edd6a430eb..55999a50ccc0 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned long *entry, *lower, *upper;
unsigned long lower_index, lower_last;
unsigned long upper_index, upper_last;
- int ret;
+ int ret = 0;
lower = NULL;
upper = NULL;
@@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max + 1],
+ upper = kmemdup(&entry[max - mas.index + 1],
((mas.last - max) *
sizeof(unsigned long)),
map->alloc_flags);
@@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
- int ret;
+ int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 71c39bcd872c..ed33cf7192d2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1965,10 +1965,10 @@ static int null_add_dev(struct nullb_device *dev)
out_ida_free:
ida_free(&nullb_indexes, nullb->index);
-out_cleanup_zone:
- null_free_zoned_dev(dev);
out_cleanup_disk:
put_disk(nullb->disk);
+out_cleanup_zone:
+ null_free_zoned_dev(dev);
out_cleanup_tags:
if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index ac8ebccd3507..812fd2a8f853 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
break;
+ }
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- if (err < 0)
- kfree_skb(skb);
-
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index b40b32fa7f1c..216826c31ee3 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -15,6 +15,8 @@
#define VERSION "0.1"
+#define QCA_BDADDR_DEFAULT (&(bdaddr_t) {{ 0xad, 0x5a, 0x00, 0x00, 0x00, 0x00 }})
+
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type soc_type)
{
@@ -612,6 +614,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+static int qca_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read device address (%d)", err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ bt_dev_err(hdev, "Device address length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bacmp(&bda->bdaddr, QCA_BDADDR_DEFAULT))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
@@ -818,6 +852,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
break;
}
+ err = qca_check_bdaddr(hdev);
+ if (err)
+ return err;
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
@@ -826,11 +864,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
+ bdaddr_t bdaddr_swapped;
struct sk_buff *skb;
int err;
- skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ baswap(&bdaddr_swapped, bdaddr);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
+ &bdaddr_swapped, HCI_EV_VENDOR,
+ HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 06e915b57283..e3946f7b736e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 8a60ad7acd70..0c9c9ee56592 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,7 +7,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -226,6 +225,7 @@ struct qca_serdev {
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
+ bool bdaddr_property_broken;
const char *firmware_name;
};
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -1843,6 +1846,7 @@ static int qca_setup(struct hci_uart *hu)
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
struct qca_btsoc_version ver;
+ struct qca_serdev *qcadev;
const char *soc_name;
ret = qca_check_speeds(hu);
@@ -1904,16 +1908,9 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
-
- /* Set BDA quirk bit for reading BDA value from fwnode property
- * only if that property exist in DT.
- */
- if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
- } else {
- bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
- }
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->bdaddr_property_broken)
+ set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -1961,8 +1958,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
- hu->hdev->wakeup = qca_wakeup;
+ if (hu->serdev) {
+ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+ hu->hdev->wakeup = qca_wakeup;
+ }
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@@ -2295,6 +2294,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
+ qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
+ "qcom,local-bd-address-broken");
+
if (data)
qcadev->btsoc_type = data->soc_type;
else
@@ -2330,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2358,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 89ed6cd6b059..e9cc8b4786fb 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/device.h>
#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/dma-noncoherent.h>
@@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
return IRQ_HANDLED;
}
+static int sifive_ccache_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long quirks;
+ int intr_num, rc;
+
+ quirks = (unsigned long)device_get_match_data(dev);
+
+ intr_num = platform_irq_count(pdev);
+ if (!intr_num)
+ return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
+
+ for (int i = 0; i < intr_num; i++) {
+ if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+ continue;
+
+ g_irq[i] = platform_get_irq(pdev, i);
+ if (g_irq[i] < 0)
+ return g_irq[i];
+
+ rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver sifive_ccache_driver = {
+ .probe = sifive_ccache_probe,
+ .driver = {
+ .name = "sifive_ccache",
+ .of_match_table = sifive_ccache_ids,
+ },
+};
+
static int __init sifive_ccache_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc, intr_num;
const struct of_device_id *match;
unsigned long quirks;
+ int rc;
np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
if (!np)
@@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
goto err_unmap;
}
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("No interrupts property\n");
- rc = -ENODEV;
- goto err_unmap;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
-
- if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
- continue;
-
- rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
- NULL);
- if (rc) {
- pr_err("Could not request IRQ %d\n", g_irq[i]);
- goto err_free_irq;
- }
- }
- of_node_put(np);
-
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
@@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
+
+ rc = platform_driver_register(&sifive_ccache_driver);
+ if (rc)
+ goto err_unmap;
+
+ of_node_put(np);
+
return 0;
-err_free_irq:
- while (--i >= 0)
- free_irq(g_irq[i], NULL);
err_unmap:
iounmap(ccache_base);
err_node_put:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 456be28ba67c..2597cb43f438 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 25371c91a58f..8cca52be993f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
-
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 449041f8abbc..c8c023afe3e5 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
- "csw_infra_f26m_sel", 9),
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc4d8..bd37ab4d1a9b 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f65b..84dce5184a77 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f44efbb89c34..2102377f727b 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data;
int cmd, rc = 0;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 1cd304de5388..b2191ade9011 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -806,6 +806,8 @@ static int save_iaa_wq(struct idxd_wq *wq)
return -EINVAL;
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
out:
return 0;
}
@@ -821,10 +823,12 @@ static void remove_iaa_wq(struct idxd_wq *wq)
}
}
- if (nr_iaa)
+ if (nr_iaa) {
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
- else
- cpus_per_iaa = 0;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
}
static int wq_table_add_wqs(int iaa, int cpu)
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 67998dbd1d46..5f3c9c5529b9 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -144,17 +144,4 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
-config CXL_PMU
- tristate "CXL Performance Monitoring Unit"
- default CXL_BUS
- depends on PERF_EVENTS
- help
- Support performance monitoring as defined in CXL rev 3.0
- section 13.2: Performance Monitoring. CXL components may have
- one or more CXL Performance Monitoring Units (CPMUs).
-
- Say 'y/m' to enable a driver that will attach to performance
- monitoring units and provide standard perf based interfaces.
-
- If unsure say 'm'.
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index af5cb818f84d..cb8c155a2c9b 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -525,22 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
- int rc;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
- rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
- if (rc < 0)
- return rc;
-
- /* Adjust back to picoseconds from nanoseconds */
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- dport->hb_coord[i].read_latency *= 1000;
- dport->hb_coord[i].write_latency *= 1000;
- }
-
- return 0;
+ return acpi_get_genport_coordinates(uid, dport->coord);
}
static int add_host_bridge_dport(struct device *match, void *arg)
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index eddbbe21450c..bb83867d9fec 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -14,12 +14,42 @@
struct dsmas_entry {
struct range dpa_range;
u8 handle;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
+static u32 cdat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * CDAT fields follow the format of HMAT fields. See table 5 Device
+ * Scoped Latency and Bandwidth Information Structure in Coherent Device
+ * Attribute Table (CDAT) Specification v1.01.
+ */
+ value = entry * base;
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
return 0;
}
-static void cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
+static void __cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
{
switch (access) {
case ACPI_HMAT_ACCESS_LATENCY:
@@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
}
}
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_access_coordinate_set(&coord[i], access, val);
+}
+
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
__le16 le_val;
u64 val;
u16 len;
- int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
@@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)dslbis->entry_base_unit;
le_val = (__force __le16)dslbis->entry[0];
- rc = check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val);
- if (rc)
- pr_warn("DSLBIS value overflowed.\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ dslbis->data_type);
- cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
return 0;
}
@@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
- struct access_coordinate ep_c;
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
- rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
return rc;
}
- rc = cxl_hb_get_perf_coordinates(port, coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return rc;
- }
-
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
if (!cxl_root)
@@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
- /*
- * Keeping the host bridge coordinates separate from the dsmas
- * coordinates in order to allow calculation of access class
- * 0 and 1 for region later.
- */
- cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU],
- &coord[ACCESS_COORDINATE_CPU],
- &dent->coord);
+ cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root,
- &coord[ACCESS_COORDINATE_CPU],
+ &dent->coord[ACCESS_COORDINATE_CPU],
1, &qos_class);
if (rc != 1)
continue;
@@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ dpa_perf->coord[i] = dent->coord[i];
dpa_perf->dpa_range = dent->dpa_range;
- dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
- dent->coord.read_bandwidth, dent->coord.write_bandwidth,
- dent->coord.read_latency, dent->coord.write_latency);
+ dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].read_latency,
+ dent->coord[ACCESS_COORDINATE_CPU].write_latency);
}
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
@@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
-
- if (check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val))
- dev_warn(dev, "SSLBIS value overflowed!\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ sslbis->data_type);
xa_for_each(&port->dports, index, dport) {
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id)
- cxl_access_coordinate_set(&dport->sw_coord,
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
sslbis->data_type,
val);
+ }
}
}
@@ -493,6 +514,21 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+static void __cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
/**
* cxl_coordinates_combine - Combine the two input coordinates
*
@@ -504,15 +540,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2)
{
- if (c1->write_bandwidth && c2->write_bandwidth)
- out->write_bandwidth = min(c1->write_bandwidth,
- c2->write_bandwidth);
- out->write_latency = c1->write_latency + c2->write_latency;
-
- if (c1->read_bandwidth && c2->read_bandwidth)
- out->read_bandwidth = min(c1->read_bandwidth,
- c2->read_bandwidth);
- out->read_latency = c1->read_latency + c2->read_latency;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
}
MODULE_IMPORT_NS(CXL);
@@ -521,17 +550,13 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlmd->endpoint;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate coord;
struct range dpa = {
.start = cxled->dpa_res->start,
.end = cxled->dpa_res->end,
};
struct cxl_dpa_perf *perf;
- int rc;
switch (cxlr->mode) {
case CXL_DECODER_RAM:
@@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
if (!range_contains(&perf->dpa_range, &dpa))
return;
- rc = cxl_hb_get_perf_coordinates(port, hb_coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return;
- }
-
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- /* Pickup the host bridge coords */
- cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord);
-
/* Get total bandwidth and the worst latency for the cxl region */
cxlr->coord[i].read_latency = max_t(unsigned int,
cxlr->coord[i].read_latency,
- coord.read_latency);
+ perf->coord[i].read_latency);
cxlr->coord[i].write_latency = max_t(unsigned int,
cxlr->coord[i].write_latency,
- coord.write_latency);
- cxlr->coord[i].read_bandwidth += coord.read_bandwidth;
- cxlr->coord[i].write_bandwidth += coord.write_bandwidth;
-
- /*
- * Convert latency to nanosec from picosec to be consistent
- * with the resulting latency coordinates computed by the
- * HMAT_REPORTING code.
- */
- cxlr->coord[i].read_latency =
- DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000);
- cxlr->coord[i].write_latency =
- DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000);
+ perf->coord[i].write_latency);
+ cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 9adda4795eb7..65185c9fa001 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -946,24 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .size_out = mds->payload_size,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -1296,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1308,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 2b0cab556072..762783bb091a 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
-/**
- * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
- * and host bridge
- *
- * @port: endpoint cxl_port
- * @coord: output access coordinates
- *
- * Return: errno on failure, 0 on success.
- */
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord)
+static void add_latency(struct access_coordinate *c, long latency)
{
- struct cxl_port *iter = port;
- struct cxl_dport *dport;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_latency += latency;
+ c[i].read_latency += latency;
+ }
+}
- if (!is_cxl_endpoint(port))
- return -EINVAL;
+static bool coordinates_valid(struct access_coordinate *c)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (c[i].read_bandwidth && c[i].write_bandwidth &&
+ c[i].read_latency && c[i].write_latency)
+ continue;
+ return false;
+ }
- dport = iter->parent_dport;
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- iter = to_cxl_port(iter->dev.parent);
- dport = iter->parent_dport;
+ return true;
+}
+
+static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
+ c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
}
+}
- coord[ACCESS_COORDINATE_LOCAL] =
- dport->hb_coord[ACCESS_COORDINATE_LOCAL];
- coord[ACCESS_COORDINATE_CPU] =
- dport->hb_coord[ACCESS_COORDINATE_CPU];
+static void set_access_coordinates(struct access_coordinate *out,
+ struct access_coordinate *in)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ out[i] = in[i];
+}
- return 0;
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
}
/**
@@ -2176,35 +2184,53 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port,
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
- struct access_coordinate c = {
- .read_bandwidth = UINT_MAX,
- .write_bandwidth = UINT_MAX,
+ struct access_coordinate c[] = {
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
};
struct cxl_port *iter = port;
struct cxl_dport *dport;
struct pci_dev *pdev;
unsigned int bw;
+ bool is_cxl_root;
if (!is_cxl_endpoint(port))
return -EINVAL;
- dport = iter->parent_dport;
-
/*
- * Exit the loop when the parent port of the current port is cxl root.
- * The iterative loop starts at the endpoint and gathers the
- * latency of the CXL link from the current iter to the next downstream
- * port each iteration. If the parent is cxl root then there is
- * nothing to gather.
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
*/
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- cxl_coordinates_combine(&c, &c, &dport->sw_coord);
- c.write_latency += dport->link_latency;
- c.read_latency += dport->link_latency;
-
- iter = to_cxl_port(iter->dev.parent);
+ do {
dport = iter->parent_dport;
- }
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root) {
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+ }
+ add_latency(c, dport->link_latency);
+ } while (!is_cxl_root);
+
+ dport = iter->parent_dport;
+ /* Retrieve HB coords */
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
/* Get the calculated PCI paths bandwidth */
pdev = to_pci_dev(port->uport_dev->parent);
@@ -2213,10 +2239,8 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
return -ENXIO;
bw /= BITS_PER_BYTE;
- c.write_bandwidth = min(c.write_bandwidth, bw);
- c.read_bandwidth = min(c.read_bandwidth, bw);
-
- *coord = c;
+ set_min_bandwidth(c, bw);
+ set_access_coordinates(coord, c);
return 0;
}
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f80955..3c42f984eeaf 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 534e25e2f0a4..036d17db68e0 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -663,8 +663,7 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
- * @sw_coord: access coordinates (performance) for switch from CDAT
- * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
@@ -675,8 +674,7 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
- struct access_coordinate sw_coord;
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
};
@@ -884,8 +882,6 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord);
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 20fb3b35e89e..36cee9c30ceb 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -401,7 +401,7 @@ enum cxl_devtype {
*/
struct cxl_dpa_perf {
struct range dpa_range;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8fe5aa67b167..8892bc701a66 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -35,12 +35,35 @@
static inline int is_dma_buf_file(struct file *);
-struct dma_buf_list {
- struct list_head head;
- struct mutex lock;
-};
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static DEFINE_MUTEX(debugfs_list_mutex);
+static LIST_HEAD(debugfs_list);
-static struct dma_buf_list db_list;
+static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+{
+ mutex_lock(&debugfs_list_mutex);
+ list_add(&dmabuf->list_node, &debugfs_list);
+ mutex_unlock(&debugfs_list_mutex);
+}
+
+static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+{
+ if (!dmabuf)
+ return;
+
+ mutex_lock(&debugfs_list_mutex);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&debugfs_list_mutex);
+}
+#else
+static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+{
+}
+
+static void __dma_buf_debugfs_list_del(struct file *file)
+{
+}
+#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -89,17 +112,10 @@ static void dma_buf_release(struct dentry *dentry)
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
- struct dma_buf *dmabuf;
-
if (!is_dma_buf_file(file))
return -EINVAL;
- dmabuf = file->private_data;
- if (dmabuf) {
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
- }
+ __dma_buf_debugfs_list_del(file->private_data);
return 0;
}
@@ -672,9 +688,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- mutex_lock(&db_list.lock);
- list_add(&dmabuf->list_node, &db_list.head);
- mutex_unlock(&db_list.lock);
+ __dma_buf_debugfs_list_add(dmabuf);
return dmabuf;
@@ -1611,7 +1625,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&db_list.lock);
+ ret = mutex_lock_interruptible(&debugfs_list_mutex);
if (ret)
return ret;
@@ -1620,7 +1634,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &db_list.head, list_node) {
+ list_for_each_entry(buf_obj, &debugfs_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1657,11 +1671,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&debugfs_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&debugfs_list_mutex);
return ret;
}
@@ -1718,8 +1732,6 @@ static int __init dma_buf_init(void)
if (IS_ERR(dma_buf_mnt))
return PTR_ERR(dma_buf_mnt);
- mutex_init(&db_list.lock);
- INIT_LIST_HEAD(&db_list.head);
dma_buf_init_debugfs();
return 0;
}
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 9c2a0c082a76..ed4b323886e4 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
return -ENOMEM;
chain = mock_chain(NULL, f, 1);
- if (!chain)
+ if (chain)
+ dma_fence_enable_sw_signaling(chain);
+ else
err = -ENOMEM;
- dma_fence_enable_sw_signaling(chain);
-
dma_fence_signal(f);
dma_fence_put(f);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..1398814d8fbb 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..c095a2c8f659 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..7b98944135eb 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -293,7 +293,7 @@ struct idxd_driver_data {
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..264c4e47d7cc 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..ad8e3da1b2cd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..313b217388fe 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..eb0637d90342 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index a4cae73f20d3..20607ed54243 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -4,4 +4,4 @@
#
config DPLL
- bool
+ bool
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 64eaca80d736..d0f6693ca142 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -42,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list;
const struct dpll_pin_ops *ops;
void *priv;
+ void *cookie;
};
struct dpll_device *dpll_device_get_by_id(int id)
@@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) {
- if (reg->ops == ops && reg->priv == priv)
+ if (reg->ops == ops && reg->priv == priv &&
+ reg->cookie == cookie)
return reg;
}
return NULL;
@@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
@@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return;
list_del(&reg->list);
@@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
int ret;
- ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret)
return ret;
- ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret)
goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret;
ref_pin_del:
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret;
}
@@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id)))
ret = -EINVAL;
else
- ret = __dpll_pin_register(dpll, pin, ops, priv);
+ ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
return ret;
@@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
- dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
}
@@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- __dpll_pin_unregister(dpll, pin, ops, priv);
+ __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL;
mutex_lock(&dpll_lock);
- ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
- ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
stop = i;
goto dpll_unregister;
@@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) {
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+ parent);
dpll_pin_delete_ntf(pin);
}
refcount_dec(&pin->refcount);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
return ret;
@@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7bc71f4be64a..38d19410a2be 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2060,6 +2060,8 @@ static void bus_reset_work(struct work_struct *work)
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
@@ -2125,12 +2127,14 @@ static irqreturn_t irq_handler(int irq, void *data)
return IRQ_NONE;
/*
- * busReset and postedWriteErr must not be cleared yet
+ * busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(ohci, event);
+ if (event & OHCI1394_busReset)
+ reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
if (event & OHCI1394_selfIDComplete)
queue_work(selfid_workqueue, &ohci->bus_reset_work);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index f2556a8e9401..9bc2e10381af 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
- if (!ids_count[list]) { /* Global Notification */
+ if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index ea9201e7044c..1fa79bba492e 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
&fc[POWERCAP_FC_PAI].get_addr, NULL,
- &fc[POWERCAP_PAI_GET].rate_limit);
+ &fc[POWERCAP_FC_PAI].rate_limit);
*p_fc = fc;
}
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 350573518503..130d13e9cd6b 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 7e1852859550..c41e7b2091cd 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
continue;
}
- target = round_up(max(md->phys_addr, alloc_min), align) + target_slot * align;
+ target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 6a6ffc6707bd..d5a8182cf2e1 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -496,6 +496,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->vid_mode = 0xffff;
hdr->type_of_loader = 0x21;
+ hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098ef3..bc550ad0dbe0 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b5633a..90283f160a22 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1576,9 +1576,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1589,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1603,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 1ee62cd58582..25db014494a4 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
case 0x5e:
return GPIOPANELCTL;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5ef8af824980..c097e310c9e8 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
+MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index b75e0b12087a..4b29abafecf6 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -195,7 +195,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
void __iomem *gisr;
u8 shift;
@@ -227,7 +228,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -236,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db40d..9130c691a2dd 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index c18b6b47384f..94ca9d03c094 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
if (gpio >= WCOVE_GPIO_NUM)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
return reg + gpio;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f384fa278764..d09c7d728365 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -728,6 +728,25 @@ static u32 line_event_id(int level)
GPIO_V2_LINE_EVENT_FALLING_EDGE;
}
+static inline char *make_irq_label(const char *orig)
+{
+ char *new;
+
+ if (!orig)
+ return NULL;
+
+ new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ return new;
+}
+
+static inline void free_irq_label(const char *label)
+{
+ kfree(label);
+}
+
#ifdef CONFIG_HTE
static enum hte_return process_hw_ts_thread(void *p)
@@ -1015,6 +1034,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
+ char *label;
/* try hardware */
ret = gpiod_set_debounce(line->desc, debounce_period_us);
@@ -1037,11 +1057,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
if (irq < 0)
return -ENXIO;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return -ENOMEM;
+
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
+ label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
} else {
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
@@ -1086,7 +1112,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
static void edge_detector_stop(struct line *line)
{
if (line->irq) {
- free_irq(line->irq, line);
+ free_irq_label(free_irq(line->irq, line));
line->irq = 0;
}
@@ -1110,6 +1136,7 @@ static int edge_detector_setup(struct line *line,
unsigned long irqflags = 0;
u64 eflags;
int irq, ret;
+ char *label;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
@@ -1146,11 +1173,17 @@ static int edge_detector_setup(struct line *line,
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
- irqflags, line->req->label, line);
- if (ret)
+ irqflags, label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
return 0;
@@ -1973,7 +2006,7 @@ static void lineevent_free(struct lineevent_state *le)
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
&le->device_unregistered_nb);
if (le->irq)
- free_irq(le->irq, le);
+ free_irq_label(free_irq(le->irq, le));
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
@@ -2114,6 +2147,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
int fd;
int ret;
int irq, irqflags = 0;
+ char *label;
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
@@ -2198,15 +2232,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ label = make_irq_label(le->label);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ goto out_free_le;
+ }
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
- le->label,
+ label,
le);
- if (ret)
+ if (ret) {
+ free_irq_label(label);
goto out_free_le;
+ }
le->irq = irq;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index ce94e37bcbee..94903fc1c145 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1175,6 +1175,9 @@ struct gpio_device *gpio_device_find(const void *data,
list_for_each_entry_srcu(gdev, &gpio_devices, list,
srcu_read_lock_held(&gpio_devices_srcu)) {
+ if (!device_is_registered(&gdev->dev))
+ continue;
+
guard(srcu)(&gdev->srcu);
gc = srcu_dereference(gdev->chip, &gdev->srcu);
@@ -2397,6 +2400,11 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);
+static inline const char *function_name_or_default(const char *con_id)
+{
+ return con_id ?: "(default)";
+}
+
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
* @gc: GPIO chip
@@ -2425,10 +2433,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags)
{
struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
+ const char *name = function_name_or_default(label);
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2438,8 +2447,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
+ chip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4153,19 +4162,17 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
enum gpiod_flags *flags,
unsigned long *lookupflags)
{
+ const char *name = function_name_or_default(con_id);
struct gpio_desc *desc = ERR_PTR(-ENOENT);
if (is_of_node(fwnode)) {
- dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);
} else if (is_acpi_node(fwnode)) {
- dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);
} else if (is_software_node(fwnode)) {
- dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);
}
@@ -4181,6 +4188,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ const char *name = function_name_or_default(con_id);
/*
* scoped_guard() is implemented as a for loop, meaning static
* analyzers will complain about these two not being initialized.
@@ -4203,8 +4211,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
}
if (IS_ERR(desc)) {
- dev_dbg(consumer, "No GPIO consumer %s found\n",
- con_id);
+ dev_dbg(consumer, "No GPIO consumer %s found\n", name);
return desc;
}
@@ -4226,15 +4233,14 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
*
* FIXME: Make this more sane and safe.
*/
- dev_info(consumer,
- "nonexclusive access to GPIO for %s\n", con_id);
+ dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
return desc;
}
ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (ret < 0) {
- dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
+ dev_dbg(consumer, "setup of GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4350,6 +4356,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
+ const char *name = function_name_or_default(con_id);
int ret;
if (lflags & GPIO_ACTIVE_LOW)
@@ -4393,7 +4400,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- gpiod_dbg(desc, "no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for GPIO %s\n", name);
return 0;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5a0c476361c3..959b19a04101 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,10 +74,12 @@ config DRM_KUNIT_TEST_HELPERS
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on KUNIT
+ depends on MMU
select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
@@ -102,6 +104,38 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
+config DRM_PANIC
+ bool "Display a user-friendly message when a kernel panic occurs"
+ depends on DRM && !FRAMEBUFFER_CONSOLE
+ select DRM_KMS_HELPER
+ select FONT_SUPPORT
+ help
+ Enable a drm panic handler, which will display a user-friendly message
+ when a kernel panic occurs. It's useful when using a user-space
+ console instead of fbcon.
+ It will only work if your graphic driver supports this feature.
+ To support Hi-DPI Display, you can enable bigger fonts like
+ FONT_TER16x32
+
+config DRM_PANIC_FOREGROUND_COLOR
+ hex "Drm panic screen foreground color, in RGB"
+ depends on DRM_PANIC
+ default 0xffffff
+
+config DRM_PANIC_BACKGROUND_COLOR
+ hex "Drm panic screen background color, in RGB"
+ depends on DRM_PANIC
+ default 0x000000
+
+config DRM_PANIC_DEBUG
+ bool "Add a debug fs entry to trigger drm_panic"
+ depends on DRM_PANIC && DEBUG_FS
+ help
+ Add dri/[device]/drm_panic_plane_x in the kernel debugfs, to force the
+ panic handler to write the panic message to this plane scanout buffer.
+ This is unsafe and should not be enabled on a production build.
+ If in doubt, say "N".
+
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
@@ -371,6 +405,8 @@ source "drivers/gpu/drm/lima/Kconfig"
source "drivers/gpu/drm/panfrost/Kconfig"
+source "drivers/gpu/drm/panthor/Kconfig"
+
source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
@@ -414,3 +450,16 @@ config DRM_LIB_RANDOM
config DRM_PRIVACY_SCREEN
bool
default n
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 104b42df2e95..f9ca4f8fa6c5 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,6 +5,34 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+# Unconditionally enable W=1 warnings locally
+# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += $(call cc-option, -Wrestrict)
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
+subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
+subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
+# FIXME: fix -Wformat-truncation warnings and uncomment
+#subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
+subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+# The following turn off the warnings enabled by -Wextra
+ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-shift-negative-value
+endif
+ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-sign-compare
+endif
+# --- end copy-paste
+
+# Enable -Werror in CI and development
+subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror
+
drm-y := \
drm_aperture.o \
drm_atomic.o \
@@ -60,6 +88,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen.o \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
+drm-$(CONFIG_DRM_PANIC) += drm_panic.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
@@ -179,6 +208,7 @@ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_PANTHOR) += panthor/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 22d88f8ef527..b0365cc1374e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,13 +2,15 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on MMU
+ depends on PCI
depends on !UML
select FW_LOADER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 4536c8ad0e11..1f6b56ec99f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -70,7 +70,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
- amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \
+ amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
@@ -80,7 +81,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -247,7 +248,8 @@ amdgpu-y += \
smuio_v11_0_6.o \
smuio_v13_0.o \
smuio_v13_0_3.o \
- smuio_v13_0_6.o
+ smuio_v13_0_6.o \
+ smuio_v14_0_2.o
# add reset block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 576067d66bb9..d0a8da67dc2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -97,7 +97,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
- return r;
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9c62552bec34..f87d53e183c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -139,6 +139,14 @@ enum amdgpu_ss {
AMDGPU_SS_DRV_UNLOAD
};
+struct amdgpu_hwip_reg_entry {
+ u32 hwip;
+ u32 inst;
+ u32 seg;
+ u32 reg_offset;
+ const char *reg_name;
+};
+
struct amdgpu_watchdog_timer {
bool timeout_fatal_disable;
uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
@@ -210,6 +218,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
@@ -493,6 +502,7 @@ struct amdgpu_wb {
uint64_t gpu_addr;
u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+ spinlock_t lock;
};
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
@@ -605,7 +615,7 @@ struct amdgpu_asic_funcs {
/* PCIe replay counter */
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
/* device supports BACO */
- bool (*supports_baco)(struct amdgpu_device *adev);
+ int (*supports_baco)(struct amdgpu_device *adev);
/* pre asic_init quirks */
void (*pre_asic_init)(struct amdgpu_device *adev);
/* enter/exit umd stable pstate */
@@ -1407,7 +1417,8 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-bool amdgpu_device_supports_baco(struct drm_device *dev);
+int amdgpu_device_supports_baco(struct drm_device *dev);
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_device_baco_enter(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 493982f94649..c50202215f6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -28,7 +28,7 @@
#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
-typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data);
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
struct aca_banks {
int nr_banks;
@@ -86,7 +86,7 @@ static void aca_banks_release(struct aca_banks *banks)
}
}
-static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count)
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count)
{
struct amdgpu_aca *aca = &adev->aca;
const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
@@ -116,20 +116,22 @@ static struct aca_regs_dump {
{"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
};
-static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank)
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank,
+ struct ras_query_context *qctx)
{
+ u64 event_id = qctx ? qctx->event_id : 0ULL;
int i;
- dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
- dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
- idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
}
-static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type,
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
- struct aca_banks *banks)
+ struct aca_banks *banks, struct ras_query_context *qctx)
{
struct amdgpu_aca *aca = &adev->aca;
const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
@@ -143,13 +145,12 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro
return -EOPNOTSUPP;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
max_count = smu_funcs->max_ue_bank_count;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
max_count = smu_funcs->max_ce_bank_count;
break;
- case ACA_ERROR_TYPE_DEFERRED:
default:
return -EINVAL;
}
@@ -164,7 +165,9 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro
if (ret)
return ret;
- aca_smu_bank_dump(adev, i, count, &bank);
+ bank.type = type;
+
+ aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
if (ret)
@@ -195,7 +198,7 @@ static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type t
return hwip->hwid == hwid && hwip->mcatype == mcatype;
}
-static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type)
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -273,59 +276,49 @@ static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_
return new_bank_error(aerr, info);
}
-static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type,
- struct aca_bank_report *report)
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count)
{
struct aca_error_cache *error_cache = &handle->error_cache;
struct aca_bank_error *bank_error;
struct aca_error *aerr;
- if (!handle || !report)
+ if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT)
return -EINVAL;
- if (!report->count[type])
+ if (!count)
return 0;
aerr = &error_cache->errors[type];
- bank_error = get_bank_error(aerr, &report->info);
+ bank_error = get_bank_error(aerr, info);
if (!bank_error)
return -ENOMEM;
- bank_error->count[type] += report->count[type];
+ bank_error->count += count;
return 0;
}
-static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, struct aca_bank_report *report)
+static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
- if (!bank || !report)
+ if (!bank)
return -EINVAL;
- if (!bank_ops->aca_bank_generate_report)
+ if (!bank_ops->aca_bank_parser)
return -EOPNOTSUPP;
- memset(report, 0, sizeof(*report));
- return bank_ops->aca_bank_generate_report(handle, bank, type,
- report, handle->data);
+ return bank_ops->aca_bank_parser(handle, bank, type,
+ handle->data);
}
static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
- struct aca_bank_report report;
int ret;
- ret = aca_generate_bank_report(handle, bank, type, &report);
- if (ret)
- return ret;
-
- if (!report.count[type])
- return 0;
-
- ret = aca_log_errors(handle, type, &report);
+ ret = aca_bank_parser(handle, bank, type);
if (ret)
return ret;
@@ -333,7 +326,7 @@ static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank
}
static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
- enum aca_error_type type, bank_handler_t handler, void *data)
+ enum aca_smu_type type, bank_handler_t handler, void *data)
{
struct aca_handle *handle;
int ret;
@@ -354,7 +347,7 @@ static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *ba
}
static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
- enum aca_error_type type, bank_handler_t handler, void *data)
+ enum aca_smu_type type, bank_handler_t handler, void *data)
{
struct aca_bank_node *node;
struct aca_bank *bank;
@@ -378,8 +371,28 @@ static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *
return 0;
}
-static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type,
- bank_handler_t handler, void *data)
+static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ bool ret = true;
+
+ /*
+ * Because the UE Valid MCA count will only be cleared after reset,
+ * in order to avoid repeated counting of the error count,
+ * the aca bank is only updated once during the gpu recovery stage.
+ */
+ if (type == ACA_SMU_TYPE_UE) {
+ if (amdgpu_ras_intr_triggered())
+ ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0;
+ else
+ atomic_set(&aca->ue_update_flag, 0);
+ }
+
+ return ret;
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
+ bank_handler_t handler, struct ras_query_context *qctx, void *data)
{
struct amdgpu_aca *aca = &adev->aca;
struct aca_banks banks;
@@ -389,9 +402,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type
if (list_empty(&aca->mgr.list))
return 0;
- /* NOTE: pmfw is only support UE and CE */
- if (type == ACA_ERROR_TYPE_DEFERRED)
- type = ACA_ERROR_TYPE_CE;
+ if (!aca_bank_should_update(adev, type))
+ return 0;
ret = aca_smu_get_valid_aca_count(adev, type, &count);
if (ret)
@@ -402,7 +414,7 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type
aca_banks_init(&banks);
- ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks);
+ ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx);
if (ret)
goto err_release_banks;
@@ -431,7 +443,7 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
if (type >= ACA_ERROR_TYPE_COUNT)
return -EINVAL;
- count = bank_error->count[type];
+ count = bank_error->count;
if (!count)
return 0;
@@ -447,6 +459,8 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
break;
case ACA_ERROR_TYPE_DEFERRED:
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
+ break;
default:
break;
}
@@ -477,12 +491,25 @@ out_unlock:
}
static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
- struct ras_err_data *err_data)
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
{
+ enum aca_smu_type smu_type;
int ret;
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ smu_type = ACA_SMU_TYPE_UE;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ case ACA_ERROR_TYPE_DEFERRED:
+ smu_type = ACA_SMU_TYPE_CE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* udpate aca bank to aca source error_cache first */
- ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL);
+ ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
if (ret)
return ret;
@@ -498,10 +525,9 @@ static bool aca_handle_is_valid(struct aca_handle *handle)
}
int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
- enum aca_error_type type, void *data)
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
- struct ras_err_data *err_data = (struct ras_err_data *)data;
-
if (!handle || !err_data)
return -EINVAL;
@@ -511,7 +537,7 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han
if (!(BIT(type) & handle->mask))
return 0;
- return __aca_get_error_data(adev, handle, type, err_data);
+ return __aca_get_error_data(adev, handle, type, err_data, qctx);
}
static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
@@ -668,6 +694,8 @@ int amdgpu_aca_init(struct amdgpu_device *adev)
struct amdgpu_aca *aca = &adev->aca;
int ret;
+ atomic_set(&aca->ue_update_flag, 0);
+
ret = aca_manager_init(&aca->mgr);
if (ret)
return ret;
@@ -680,6 +708,8 @@ void amdgpu_aca_fini(struct amdgpu_device *adev)
struct amdgpu_aca *aca = &adev->aca;
aca_manager_fini(&aca->mgr);
+
+ atomic_set(&aca->ue_update_flag, 0);
}
int amdgpu_aca_reset(struct amdgpu_device *adev)
@@ -723,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
- int error_code;
-
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
- error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
- return error_code & 0xff;
- }
- break;
- default:
- break;
- }
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
- /* NOTE: the true error code is encoded in status.errorcode[0:7] */
- error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+ if (!smu_funcs || !smu_funcs->parse_error_code)
+ return -EOPNOTSUPP;
- return error_code & 0xff;
+ return smu_funcs->parse_error_code(adev, bank);
}
int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
@@ -750,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank
return -EINVAL;
error_code = aca_bank_get_error_code(adev, bank);
+ if (error_code < 0)
+ return error_code;
+
for (i = 0; i < size; i++) {
if (err_codes[i] == error_code)
return 0;
@@ -784,7 +807,7 @@ static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
return 0;
}
-static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx)
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx)
{
struct aca_bank_info info;
int i, ret;
@@ -793,7 +816,7 @@ static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_e
if (ret)
return;
- seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE");
seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
@@ -807,7 +830,7 @@ struct aca_dump_context {
};
static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
struct aca_dump_context *ctx = (struct aca_dump_context *)data;
@@ -816,7 +839,7 @@ static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *ban
return handler_aca_log_bank_error(handle, bank, type, NULL);
}
-static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
+static int aca_dump_show(struct seq_file *m, enum aca_smu_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct aca_dump_context context = {
@@ -824,12 +847,12 @@ static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
.idx = 0,
};
- return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context);
+ return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context);
}
static int aca_dump_ce_show(struct seq_file *m, void *unused)
{
- return aca_dump_show(m, ACA_ERROR_TYPE_CE);
+ return aca_dump_show(m, ACA_SMU_TYPE_CE);
}
static int aca_dump_ce_open(struct inode *inode, struct file *file)
@@ -847,7 +870,7 @@ static const struct file_operations aca_ce_dump_debug_fops = {
static int aca_dump_ue_show(struct seq_file *m, void *unused)
{
- return aca_dump_show(m, ACA_ERROR_TYPE_UE);
+ return aca_dump_show(m, ACA_SMU_TYPE_UE);
}
static int aca_dump_ue_open(struct inode *inode, struct file *file)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 2da50e095883..5ef6b745f222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -26,6 +26,9 @@
#include <linux/list.h>
+struct ras_err_data;
+struct ras_query_context;
+
#define ACA_MAX_REGS_COUNT (16)
#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
@@ -99,7 +102,14 @@ enum aca_error_type {
ACA_ERROR_TYPE_COUNT
};
+enum aca_smu_type {
+ ACA_SMU_TYPE_UE = 0,
+ ACA_SMU_TYPE_CE,
+ ACA_SMU_TYPE_COUNT,
+};
+
struct aca_bank {
+ enum aca_smu_type type;
u64 regs[ACA_MAX_REGS_COUNT];
};
@@ -115,15 +125,10 @@ struct aca_bank_info {
int mcatype;
};
-struct aca_bank_report {
- struct aca_bank_info info;
- u64 count[ACA_ERROR_TYPE_COUNT];
-};
-
struct aca_bank_error {
struct list_head node;
struct aca_bank_info info;
- u64 count[ACA_ERROR_TYPE_COUNT];
+ u64 count;
};
struct aca_error {
@@ -157,9 +162,8 @@ struct aca_handle {
};
struct aca_bank_ops {
- int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data);
- bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ int (*aca_bank_parser)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
+ bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type,
void *data);
};
@@ -167,13 +171,15 @@ struct aca_smu_funcs {
int max_ue_bank_count;
int max_ce_bank_count;
int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
- int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count);
- int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank);
+ int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count);
+ int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank);
+ int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank);
};
struct amdgpu_aca {
struct aca_handle_manager mgr;
const struct aca_smu_funcs *smu_funcs;
+ atomic_t ue_update_flag;
bool is_enabled;
};
@@ -196,7 +202,10 @@ int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
const char *name, const struct aca_info *aca_info, void *data);
void amdgpu_aca_remove_handle(struct aca_handle *handle);
int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
- enum aca_error_type type, void *data);
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx);
int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 6d72355ac492..bf6c4a0d0525 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -637,6 +637,8 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version acp_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 35dd6effa9a3..7ba05f030dd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -747,10 +747,17 @@ bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
return amdgpu_ras_get_fed_status(adev);
}
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
+}
+
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+ enum amdgpu_ras_block block, uint32_t reset)
{
- amdgpu_umc_poison_handler(adev, block, reset);
+ amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset);
}
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
@@ -769,12 +776,20 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst, int hub_type)
{
- if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
- return adev->gfx.ras->query_utcl2_poison_status(adev);
- else
- return false;
+ if (!hub_type) {
+ if (adev->gfxhub.funcs->query_utcl2_poison_status)
+ return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ } else {
+ if (adev->mmhub.funcs->query_utcl2_poison_status)
+ return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ }
}
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0ef223c2affb..1de021ebdd46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -336,12 +336,18 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
+
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 69810b3f1c63..3ab6c3aa0ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -881,6 +881,7 @@ uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
}
#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+#define SQ_WATCH_STRIDE (mmSQ_WATCH1_ADDR_H - mmSQ_WATCH0_ADDR_H)
uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint64_t watch_address,
uint32_t watch_address_mask,
@@ -889,55 +890,93 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint32_t debug_vmid,
uint32_t inst)
{
+ /* SQ_WATCH?_ADDR_* and TCP_WATCH?_ADDR_* are programmed with the
+ * same values.
+ */
uint32_t watch_address_high;
uint32_t watch_address_low;
- uint32_t watch_address_cntl;
-
- watch_address_cntl = 0;
+ uint32_t tcp_watch_address_cntl;
+ uint32_t sq_watch_address_cntl;
watch_address_low = lower_32_bits(watch_address);
watch_address_high = upper_32_bits(watch_address) & 0xffff;
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = 0;
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VMID,
debug_vmid);
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
MODE,
watch_mode);
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
MASK,
watch_address_mask >> 7);
+ sq_watch_address_cntl = 0;
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
/* Turning off this watch point until we set all the registers */
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
0);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
- watch_address_cntl);
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 0);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
+ /* Program {TCP,SQ}_WATCH?_ADDR* */
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_H) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_high);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_L) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_low);
+
/* Enable the watch point */
- watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
1);
-
WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
- watch_address_cntl);
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 1);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
return 0;
}
@@ -953,8 +992,14 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
(watch_id * TCP_WATCH_STRIDE)),
watch_address_cntl);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_cntl);
+
return 0;
}
+#undef TCP_WATCH_STRIDE
+#undef SQ_WATCH_STRIDE
/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df58a6a1a67e..e4d4e55c08ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt)) {
+ vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
ret = -ENOMEM;
goto release;
}
@@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate BOs and map them to GPUVM (update VM page tables). */
+ /* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
- if (failed_size)
- pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
- /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
- * validations above would invalidate DMABuf imports again.
- */
- ret = process_validate_vms(process_info, &exec.ticket);
- if (ret) {
- pr_debug("Validating VMs failed, ret: %d\n", ret);
- goto validate_map_fail;
- }
-
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 6857c586ded7..a6d64bdbbb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -34,6 +34,7 @@ union firmware_info {
struct atom_firmware_info_v3_2 v32;
struct atom_firmware_info_v3_3 v33;
struct atom_firmware_info_v3_4 v34;
+ struct atom_firmware_info_v3_5 v35;
};
/*
@@ -872,6 +873,10 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
fw_reserved_fb_size =
(firmware_info->v34.fw_reserved_size_in_kb << 10);
break;
+ case 5:
+ fw_reserved_fb_size =
+ (firmware_info->v35.fw_reserved_size_in_kb << 10);
+ break;
default:
fw_reserved_fb_size = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index edc6377ec5ff..199693369c7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false, false, false);
+ false, false, 0);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0a4b09709cfb..ec888fc6ead8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5d0fa207a88..b62ae3c91a9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2065,12 +2065,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
uint32_t *new = NULL, *tmp = NULL;
- int ret, i = 0, len = 0;
+ unsigned int len = 0;
+ int ret, i = 0;
do {
memset(reg_offset, 0, 11);
if (copy_from_user(reg_offset, buf + len,
- min(10, ((int)size-len)))) {
+ min(10, (size-len)))) {
ret = -EFAULT;
goto error_free;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
new file mode 100644
index 000000000000..c1cb62683695
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/devcoredump.h>
+#include "amdgpu_dev_coredump.h"
+#include "atom.h"
+
+#ifndef CONFIG_DEV_COREDUMP
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context)
+{
+}
+#else
+
+const char *hw_ip_names[MAX_HWIP] = {
+ [GC_HWIP] = "GC",
+ [HDP_HWIP] = "HDP",
+ [SDMA0_HWIP] = "SDMA0",
+ [SDMA1_HWIP] = "SDMA1",
+ [SDMA2_HWIP] = "SDMA2",
+ [SDMA3_HWIP] = "SDMA3",
+ [SDMA4_HWIP] = "SDMA4",
+ [SDMA5_HWIP] = "SDMA5",
+ [SDMA6_HWIP] = "SDMA6",
+ [SDMA7_HWIP] = "SDMA7",
+ [LSDMA_HWIP] = "LSDMA",
+ [MMHUB_HWIP] = "MMHUB",
+ [ATHUB_HWIP] = "ATHUB",
+ [NBIO_HWIP] = "NBIO",
+ [MP0_HWIP] = "MP0",
+ [MP1_HWIP] = "MP1",
+ [UVD_HWIP] = "UVD/JPEG/VCN",
+ [VCN1_HWIP] = "VCN1",
+ [VCE_HWIP] = "VCE",
+ [VPE_HWIP] = "VPE",
+ [DF_HWIP] = "DF",
+ [DCE_HWIP] = "DCE",
+ [OSSSYS_HWIP] = "OSSSYS",
+ [SMUIO_HWIP] = "SMUIO",
+ [PWR_HWIP] = "PWR",
+ [NBIF_HWIP] = "NBIF",
+ [THM_HWIP] = "THM",
+ [CLK_HWIP] = "CLK",
+ [UMC_HWIP] = "UMC",
+ [RSMU_HWIP] = "RSMU",
+ [XGMI_HWIP] = "XGMI",
+ [DCI_HWIP] = "DCI",
+ [PCIE_HWIP] = "PCIE",
+};
+
+static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev,
+ struct drm_printer *p)
+{
+ uint32_t version;
+ uint32_t feature;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n",
+ adev->vce.fb_version, adev->vce.fw_version);
+ drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0,
+ adev->uvd.fw_version);
+ drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gmc.fw_version);
+ drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.me_feature_version, adev->gfx.me_fw_version);
+ drm_printf(p, "PFP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.pfp_feature_version, adev->gfx.pfp_fw_version);
+ drm_printf(p, "CE feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.ce_feature_version, adev->gfx.ce_fw_version);
+ drm_printf(p, "RLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_feature_version, adev->gfx.rlc_fw_version);
+
+ drm_printf(p, "RLC SRLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlc_feature_version,
+ adev->gfx.rlc_srlc_fw_version);
+ drm_printf(p, "RLC SRLG feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlg_feature_version,
+ adev->gfx.rlc_srlg_fw_version);
+ drm_printf(p, "RLC SRLS feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srls_feature_version,
+ adev->gfx.rlc_srls_fw_version);
+ drm_printf(p, "RLCP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcp_ucode_feature_version,
+ adev->gfx.rlcp_ucode_version);
+ drm_printf(p, "RLCV feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcv_ucode_feature_version,
+ adev->gfx.rlcv_ucode_version);
+ drm_printf(p, "MEC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec_feature_version, adev->gfx.mec_fw_version);
+
+ if (adev->gfx.mec2_fw)
+ drm_printf(p, "MEC2 feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec2_feature_version,
+ adev->gfx.mec2_fw_version);
+
+ drm_printf(p, "IMU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gfx.imu_fw_version);
+ drm_printf(p, "PSP SOS feature version: %u, fw version: 0x%08x\n",
+ adev->psp.sos.feature_version, adev->psp.sos.fw_version);
+ drm_printf(p, "PSP ASD feature version: %u, fw version: 0x%08x\n",
+ adev->psp.asd_context.bin_desc.feature_version,
+ adev->psp.asd_context.bin_desc.fw_version);
+
+ drm_printf(p, "TA XGMI feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.xgmi_context.context.bin_desc.feature_version,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAS feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.ras_context.context.bin_desc.feature_version,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA HDCP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.hdcp_context.context.bin_desc.feature_version,
+ adev->psp.hdcp_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA DTM feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.dtm_context.context.bin_desc.feature_version,
+ adev->psp.dtm_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.rap_context.context.bin_desc.feature_version,
+ adev->psp.rap_context.context.bin_desc.fw_version);
+ drm_printf(p,
+ "TA SECURE DISPLAY feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.securedisplay_context.context.bin_desc.feature_version,
+ adev->psp.securedisplay_context.context.bin_desc.fw_version);
+
+ /* SMC firmware */
+ version = adev->pm.fw_version;
+
+ smu_program = (version >> 24) & 0xff;
+ smu_major = (version >> 16) & 0xff;
+ smu_minor = (version >> 8) & 0xff;
+ smu_debug = (version >> 0) & 0xff;
+ drm_printf(p,
+ "SMC feature version: %u, program: %d, fw version: 0x%08x (%d.%d.%d)\n",
+ 0, smu_program, version, smu_major, smu_minor, smu_debug);
+
+ /* SDMA firmware */
+ for (int i = 0; i < adev->sdma.num_instances; i++) {
+ drm_printf(p,
+ "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, adev->sdma.instance[i].feature_version,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ drm_printf(p, "VCN feature version: %u, fw version: 0x%08x\n", 0,
+ adev->vcn.fw_version);
+ drm_printf(p, "DMCU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcu_fw_version);
+ drm_printf(p, "DMCUB feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcub_fw_version);
+ drm_printf(p, "PSP TOC feature version: %u, fw version: 0x%08x\n",
+ adev->psp.toc.feature_version, adev->psp.toc.fw_version);
+
+ version = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES_KIQ feature version: %u, fw version: 0x%08x\n",
+ feature, version);
+
+ version = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES feature version: %u, fw version: 0x%08x\n", feature,
+ version);
+
+ drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n",
+ adev->vpe.feature_version, adev->vpe.fw_version);
+
+ drm_printf(p, "\nVBIOS Information\n");
+ drm_printf(p, "vbios name : %s\n", ctx->name);
+ drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
+ drm_printf(p, "vbios version : %d\n", ctx->version);
+ drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
+ drm_printf(p, "vbios date : %s\n", ctx->date);
+}
+
+static ssize_t
+amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen)
+{
+ struct drm_printer p;
+ struct amdgpu_coredump_info *coredump = data;
+ struct drm_print_iterator iter;
+ struct amdgpu_vm_fault_info *fault_info;
+ int i, ver;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
+ drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
+ coredump->reset_time.tv_nsec);
+
+ if (coredump->reset_task_info.pid)
+ drm_printf(&p, "process_name: %s PID: %d\n",
+ coredump->reset_task_info.process_name,
+ coredump->reset_task_info.pid);
+
+ /* GPU IP's information of the SOC */
+ drm_printf(&p, "\nIP Information\n");
+ drm_printf(&p, "SOC Family: %d\n", coredump->adev->family);
+ drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id);
+ drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id);
+
+ for (int i = 1; i < MAX_HWIP; i++) {
+ for (int j = 0; j < HWIP_MAX_INSTANCE; j++) {
+ ver = coredump->adev->ip_versions[i][j];
+ if (ver)
+ drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n",
+ hw_ip_names[i], i, j,
+ IP_VERSION_MAJ(ver),
+ IP_VERSION_MIN(ver),
+ IP_VERSION_REV(ver),
+ IP_VERSION_VARIANT(ver),
+ IP_VERSION_SUBREV(ver));
+ }
+ }
+
+ /* IP firmware information */
+ drm_printf(&p, "\nIP Firmwares\n");
+ amdgpu_devcoredump_fw_info(coredump->adev, &p);
+
+ if (coredump->ring) {
+ drm_printf(&p, "\nRing timed out details\n");
+ drm_printf(&p, "IP Type: %d Ring Name: %s\n",
+ coredump->ring->funcs->type,
+ coredump->ring->name);
+ }
+
+ /* Add page fault information */
+ fault_info = &coredump->adev->vm_manager.fault_info;
+ drm_printf(&p, "\n[%s] Page fault observed\n",
+ fault_info->vmhub ? "mmhub" : "gfxhub");
+ drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr);
+ drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status);
+
+ /* dump the ip state for each ip */
+ drm_printf(&p, "IP Dump\n");
+ for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
+ if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n",
+ coredump->adev->ip_blocks[i]
+ .version->funcs->name);
+ coredump->adev->ip_blocks[i]
+ .version->funcs->print_ip_state(
+ (void *)coredump->adev, &p);
+ drm_printf(&p, "\n");
+ }
+ }
+
+ /* Add ring buffer information */
+ drm_printf(&p, "Ring buffer information\n");
+ for (int i = 0; i < coredump->adev->num_rings; i++) {
+ int j = 0;
+ struct amdgpu_ring *ring = coredump->adev->rings[i];
+
+ drm_printf(&p, "ring name: %s\n", ring->name);
+ drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n",
+ amdgpu_ring_get_rptr(ring),
+ amdgpu_ring_get_wptr(ring),
+ ring->buf_mask);
+ drm_printf(&p, "Ring size in dwords: %d\n",
+ ring->ring_size / 4);
+ drm_printf(&p, "Ring contents\n");
+ drm_printf(&p, "Offset \t Value\n");
+
+ while (j < ring->ring_size) {
+ drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j / 4]);
+ j += 4;
+ }
+ }
+
+ if (coredump->reset_vram_lost)
+ drm_printf(&p, "VRAM is lost due to GPU reset!\n");
+ if (coredump->adev->reset_info.num_regs) {
+ drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
+
+ for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
+ drm_printf(&p, "0x%08x: 0x%08x\n",
+ coredump->adev->reset_info.reset_dump_reg_list[i],
+ coredump->adev->reset_info.reset_dump_reg_value[i]);
+ }
+
+ return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+ kfree(data);
+}
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_coredump_info *coredump;
+ struct drm_device *dev = adev_to_drm(adev);
+ struct amdgpu_job *job = reset_context->job;
+ struct drm_sched_job *s_job;
+
+ coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
+
+ if (!coredump) {
+ DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
+ return;
+ }
+
+ coredump->reset_vram_lost = vram_lost;
+
+ if (reset_context->job && reset_context->job->vm) {
+ struct amdgpu_task_info *ti;
+ struct amdgpu_vm *vm = reset_context->job->vm;
+
+ ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ti) {
+ coredump->reset_task_info = *ti;
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
+ if (job) {
+ s_job = &job->base;
+ coredump->ring = to_amdgpu_ring(s_job->sched);
+ }
+
+ coredump->adev = adev;
+
+ ktime_get_ts64(&coredump->reset_time);
+
+ dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
+ amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
new file mode 100644
index 000000000000..52459512cb2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DEV_COREDUMP_H__
+#define __AMDGPU_DEV_COREDUMP_H__
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+
+#define AMDGPU_COREDUMP_VERSION "1"
+
+struct amdgpu_coredump_info {
+ struct amdgpu_device *adev;
+ struct amdgpu_task_info reset_task_info;
+ struct timespec64 reset_time;
+ bool reset_vram_lost;
+ struct amdgpu_ring *ring;
+};
+#endif
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
+ struct amdgpu_reset_context *reset_context);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5dc24c971b41..861ccff78af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -74,6 +74,7 @@
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
#include "amdgpu_virt.h"
+#include "amdgpu_dev_coredump.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
@@ -143,6 +144,8 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
+static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+
/**
* DOC: pcie_replay_count
*
@@ -335,16 +338,93 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
*
* @dev: drm_device pointer
*
- * Returns true if the device supporte BACO,
- * otherwise return false.
+ * Return:
+ * 1 if the device supporte BACO;
+ * 3 if the device support MACO (only works if BACO is supported)
+ * otherwise return 0.
*/
-bool amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev);
}
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
+{
+ struct drm_device *dev;
+ int bamaco_support;
+
+ dev = adev_to_drm(adev);
+
+ adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
+ bamaco_support = amdgpu_device_supports_baco(dev);
+
+ switch (amdgpu_runtime_pm) {
+ case 2:
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
+ } else if (bamaco_support == BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
+ }
+ break;
+ case 1:
+ if (bamaco_support & BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Forcing BACO for runtime pm\n");
+ }
+ break;
+ case -1:
+ case -2:
+ if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
+ dev_info(adev->dev, "Using ATPX for runtime pm\n");
+ } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
+ dev_info(adev->dev, "Using BOCO for runtime pm\n");
+ } else {
+ if (!bamaco_support)
+ goto no_runtime_pm;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ /* BACO are not supported on vega20 and arctrus */
+ break;
+ case CHIP_VEGA10:
+ /* enable BACO as runpm mode if noretry=0 */
+ if (!adev->gmc.noretry)
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ default:
+ /* enable BACO as runpm mode on CI+ */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ }
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Using BAMACO for runtime pm\n");
+ } else {
+ dev_info(adev->dev, "Using BACO for runtime pm\n");
+ }
+ }
+ }
+ break;
+ case 0:
+ dev_info(adev->dev, "runtime pm is manually disabled\n");
+ break;
+ default:
+ break;
+ }
+
+no_runtime_pm:
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
+ dev_info(adev->dev, "Runtime PM not available\n");
+}
/**
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
@@ -1402,13 +1482,17 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
*/
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
- unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
+ unsigned long flags, offset;
+ spin_lock_irqsave(&adev->wb.lock, flags);
+ offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
*wb = offset << 3; /* convert to dw offset */
return 0;
} else {
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
return -EINVAL;
}
}
@@ -1423,9 +1507,13 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
*/
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
+ unsigned long flags;
+
wb >>= 3;
+ spin_lock_irqsave(&adev->wb.lock, flags);
if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used);
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
}
/**
@@ -1455,7 +1543,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space,please check!!\n");
+ DRM_WARN("System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -3981,6 +4069,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->wb.lock);
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
@@ -4069,6 +4158,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
amdgpu_gmc_noretry_set(adev);
/* Need to get xgmi info early to decide the reset behavior*/
if (adev->gmc.xgmi.supported) {
@@ -4135,18 +4231,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
+ } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev)) {
+ r = psp_gpu_reset(adev);
} else {
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
- }
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ }
+
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
}
}
@@ -4539,6 +4639,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
if (r)
goto unprepare;
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -4968,12 +5070,15 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
+ amdgpu_device_stop_pending_resets(adev);
+
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
r = amdgpu_virt_reset_gpu(adev);
if (r)
return r;
+ amdgpu_ras_set_fed(adev, false);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5257,11 +5362,21 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ uint32_t i;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
- amdgpu_reset_reg_dumps(tmp_adev);
+
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ amdgpu_reset_reg_dumps(tmp_adev);
+
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)tmp_adev);
+ }
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
@@ -5334,7 +5449,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
+ amdgpu_coredump(tmp_adev, vram_lost, reset_context);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
@@ -5532,6 +5648,23 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
}
+static int amdgpu_device_health_check(struct list_head *device_list_handle)
+{
+ struct amdgpu_device *tmp_adev;
+ int ret = 0;
+ u32 status;
+
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
+ if (PCI_POSSIBLE_ERROR(status)) {
+ dev_err(tmp_adev->dev, "device lost from bus!");
+ ret = -ENODEV;
+ }
+ }
+
+ return ret;
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -5603,6 +5736,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(device_list_handle);
+ if (r)
+ goto end_reset;
+ }
+
/* We need to lock reset domain only once both for XGMI and single device */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
@@ -5685,11 +5824,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
tmp_adev->asic_reset_res = r;
}
- /*
- * Drop all pending non scheduler resets. Scheduler resets
- * were already dropped during drm_sched_stop
- */
- amdgpu_device_stop_pending_resets(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ /*
+ * Drop all pending non scheduler resets. Scheduler resets
+ * were already dropped during drm_sched_stop
+ */
+ amdgpu_device_stop_pending_resets(tmp_adev);
}
/* Actual ASIC resets if needed.*/
@@ -5768,6 +5908,7 @@ skip_sched_resume:
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a07e4b87d4ca..0e31bdb4b7cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -97,6 +97,7 @@
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
#include "jpeg_v5_0_0.h"
@@ -245,6 +246,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
return -ENOENT;
}
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
@@ -259,14 +263,14 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* wait for this to complete. Once the C2PMSG is updated, we can
* continue.
*/
- if (dev_is_removable(&adev->pdev->dev)) {
- for (i = 0; i < 1000; i++) {
- msg = RREG32(mmMP0_SMN_C2PMSG_33);
- if (msg & 0x80000000)
- break;
- msleep(1);
- }
+
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
}
+
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) {
@@ -1896,6 +1900,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2237,6 +2244,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
if (amdgpu_umsch_mm & 0x1) {
amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
adev->enable_umsch_mm = true;
@@ -2676,6 +2684,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 1):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 80b9642f2bc4..ea14f1c8f430 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@@ -668,6 +669,15 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+ "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
+/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
* (0 = disabled (default), 1 = enabled)
@@ -915,7 +925,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0444);
+module_param_named(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -2471,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
/* Use a common context, just need to make sure full reset is done */
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
r = amdgpu_do_asic_reset(&device_list, &reset_context);
if (r) {
@@ -2734,7 +2745,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
} else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
/* nothing to do */
- } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
amdgpu_device_baco_enter(drm_dev);
}
@@ -2774,7 +2786,8 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
* PCI core handles it for _PR3.
*/
pci_set_master(pdev);
- } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 55d5508987ff..1d955652f3ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1206,7 +1206,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
break;
default:
- break;
+ dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
+ return;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 8fcf889ddce9..64f197bbc866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -259,7 +259,6 @@ struct amdgpu_cu_info {
struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
@@ -434,6 +433,10 @@ struct amdgpu_gfx {
uint32_t num_xcc_per_xcp;
struct mutex partition_mutex;
bool mcbp; /* mid command buffer preemption */
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
+ uint32_t reg_count;
};
struct amdgpu_gfx_ras_reg_entry {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index c7b44aeb671b..103a837ccc71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -38,6 +38,8 @@ struct amdgpu_gfxhub_funcs {
void (*mode2_save_regs)(struct amdgpu_device *adev);
void (*mode2_restore_regs)(struct amdgpu_device *adev);
void (*halt)(struct amdgpu_device *adev);
+ bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
+ int xcc_id);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index d79cb13e1aa8..00d6211e0fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -279,7 +279,7 @@ amdgpu_i2c_lookup(struct amdgpu_device *adev,
return NULL;
}
-static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val)
@@ -304,16 +304,18 @@ static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
- *val = in_buf[0];
- DRM_DEBUG("val = 0x%02x\n", *val);
- } else {
- DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
- addr, *val);
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) != 2) {
+ DRM_DEBUG("i2c 0x%02x read failed\n", addr);
+ return -EIO;
}
+
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+
+ return 0;
}
-static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 val)
@@ -329,9 +331,12 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = val;
- if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
- addr, val);
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) {
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val);
+ return -EIO;
+ }
+
+ return 0;
}
/* ddc router switching */
@@ -346,16 +351,18 @@ amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connecto
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
val |= amdgpu_connector->router.ddc_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
@@ -375,16 +382,18 @@ amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
val |= amdgpu_connector->router.cd_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7e6d09730e6d..665c63f55278 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -445,6 +445,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
entry.ih = ih;
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+
+ /*
+ * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
+ * si and tonga), so initialize timestamp and timestamp_src to 0
+ */
+ entry.timestamp = 0;
+ entry.timestamp_src = 0;
+
amdgpu_ih_decode_iv(adev, &entry);
trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4b3000c21ef2..e4742b65032d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
- DRM_INFO("Skip scheduling IBs!\n");
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
}
job->job_run_counter++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a2df3025a754..a0ea6fe8d060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -149,38 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- if (amdgpu_device_supports_px(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
- adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
- dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
- dev_info(adev->dev, "Using BOCO for runtime pm\n");
- } else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm != 0)) {
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- /* enable BACO as runpm mode if runpm=1 */
- if (amdgpu_runtime_pm > 0)
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- case CHIP_VEGA10:
- /* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- default:
- /* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
- break;
- }
-
- if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
- dev_info(adev->dev, "Using BACO for runtime pm\n");
- }
+ amdgpu_device_detect_runtime_pm_mode(adev);
/* Call ACPI methods: require modeset init
* but failure is not fatal
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 24ad4b97177b..0734490347db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -210,22 +210,26 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
return -EOPNOTSUPP;
}
-static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
+static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
+ struct ras_query_context *qctx)
{
- dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
- dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_STATUS]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_ADDR]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_MISC0]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_IPID]);
- dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
- idx, entry->regs[MCA_REG_IDX_SYND]);
+ u64 event_id = qctx->event_id;
+
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_STATUS]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_ADDR]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_MISC0]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_IPID]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_SYND]);
}
-int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
{
struct amdgpu_smuio_mcm_config_info mcm_info;
struct ras_err_addr err_addr = {0};
@@ -244,7 +248,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
list_for_each_entry(node, &mca_set.list, node) {
entry = &node->entry;
- amdgpu_mca_smu_mca_bank_dump(adev, i++, entry);
+ amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx);
count = 0;
ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index b964110ed1e0..e5bf07ce3451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -169,6 +169,7 @@ void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root
void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set);
int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry);
void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set);
-int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data);
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a98e03e0a51f..5ca5c47ab54e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -32,6 +32,18 @@
#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
#define AMDGPU_ONE_DOORBELL_SIZE 8
+signed long amdgpu_mes_fence_wait_polling(u64 *fence,
+ u64 wait_seq,
+ signed long timeout)
+{
+
+ while ((s64)(wait_seq - *fence) > 0 && timeout > 0) {
+ udelay(2);
+ timeout -= 2;
+ }
+ return timeout > 0 ? timeout : 0;
+}
+
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
{
return roundup(AMDGPU_ONE_DOORBELL_SIZE *
@@ -40,7 +52,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
}
static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- struct amdgpu_mes_process *process,
int ip_type, uint64_t *doorbell_index)
{
unsigned int offset, found;
@@ -65,7 +76,6 @@ static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
}
static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- struct amdgpu_mes_process *process,
uint32_t doorbell_index)
{
unsigned int old, rel_index;
@@ -102,7 +112,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ if (!amdgpu_mes_log_enable)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@@ -653,7 +666,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
*queue_id = queue->queue_id = r;
/* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
+ r = amdgpu_mes_kernel_doorbell_get(adev,
qprops->queue_type,
&qprops->doorbell_off);
if (r)
@@ -711,8 +724,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
return 0;
clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, gang->process,
- qprops->doorbell_off);
+ amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
clean_up_queue_id:
spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
@@ -766,8 +778,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
queue_id);
list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, gang->process,
- queue->doorbell_off);
+ amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
amdgpu_mes_unlock(&adev->mes);
amdgpu_mes_queue_free_mqd(queue);
@@ -775,6 +786,28 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ struct mes_map_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+
+ r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to map legacy queue\n");
+
+ return r;
+}
+
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
@@ -1129,6 +1162,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
@@ -1471,7 +1505,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
const struct mes_firmware_header_v1_0 *mes_hdr;
struct amdgpu_firmware_info *info;
char ucode_prefix[30];
- char fw_name[40];
+ char fw_name[50];
bool need_retry = false;
int r;
@@ -1549,12 +1583,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
- mem, PAGE_SIZE, false);
+ mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@@ -1565,7 +1598,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
- if (adev->enable_mes)
+ if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7d4f93fea937..df9f0404d842 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
@@ -140,6 +141,12 @@ struct amdgpu_mes {
/* ip specific functions */
const struct amdgpu_mes_funcs *funcs;
+
+ /* mes resource_1 bo*/
+ struct amdgpu_bo *resource_1;
+ uint64_t resource_1_gpu_addr;
+ void *resource_1_addr;
+
};
struct amdgpu_mes_process {
@@ -241,6 +248,15 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
+struct mes_map_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+};
+
struct mes_unmap_legacy_queue_input {
enum amdgpu_unmap_queues_action action;
uint32_t queue_type;
@@ -317,6 +333,9 @@ struct amdgpu_mes_funcs {
int (*remove_hw_queue)(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input);
+ int (*map_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_map_legacy_queue_input *input);
+
int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
struct mes_unmap_legacy_queue_input *input);
@@ -333,6 +352,10 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
+signed long amdgpu_mes_fence_wait_polling(u64 *fence,
+ u64 wait_seq,
+ signed long timeout);
+
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
@@ -356,6 +379,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1ca9d4ed8063..95d676ee207f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -63,6 +63,8 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
+ bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
+ int hub_inst);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 010b0cb7693c..b2a83c802bbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -39,6 +39,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_vram_mgr.h"
/**
* DOC: amdgpu_object
@@ -153,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ if (abo->tbo.type == ttm_bo_type_kernel &&
+ flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
+
c++;
}
@@ -173,6 +176,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
+ /*
+ * When GTT is just an alternative to VRAM make sure that we
+ * only use it as fallback and still try to fill up VRAM first.
+ */
+ if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -595,8 +604,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- if (adev->ras_enabled)
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
@@ -605,6 +613,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)
@@ -617,8 +627,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -628,7 +637,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
+ r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -758,7 +767,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false, false);
+ true, false, 0);
}
/**
@@ -960,6 +969,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+ bo->placements[i].mem_type == TTM_PL_VRAM)
+ bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -1272,23 +1285,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@@ -1359,8 +1374,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
return;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
if (!WARN_ON(r)) {
+ amdgpu_vram_mgr_set_cleared(bo->resource);
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
}
@@ -1389,10 +1405,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1428,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1592,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1587,10 +1601,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be679c42b0b8..fa03d9e4874c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 94b310fdb719..4bd4602d11b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1053,6 +1053,11 @@ static int psp_asd_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
+ /* bypass asd if display hardware is not available */
+ if (!amdgpu_device_has_display_hardware(psp->adev) &&
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
+ return 0;
+
psp->asd_context.mem_context.shared_mc_addr = 0;
psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
@@ -2260,6 +2265,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
+ (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
+ ret = psp_bootloader_load_ipkeymgr_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2617,7 +2631,8 @@ static int psp_load_p2s_table(struct psp_context *psp)
struct amdgpu_firmware_info *ucode =
&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
- if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
return 0;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
@@ -2647,7 +2662,8 @@ static int psp_load_smu_fw(struct psp_context *psp)
* Skip SMU FW reloading in case of using BACO for runpm only,
* as SMU is always alive.
*/
- if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
return 0;
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
@@ -3273,6 +3289,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
+ psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ipkeymgr_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index ee16f134ae92..3635303e6548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -73,8 +73,10 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
+ PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV,
PSP_BL__LOAD_INTFDRV = 0xD0000,
- PSP_BL__LOAD_RASDRV = 0xE0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
+ PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@@ -117,6 +119,7 @@ struct psp_funcs {
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
+ int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -336,6 +339,7 @@ struct psp_context {
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
+ struct psp_bin_desc ipkeymgr_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -424,6 +428,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ras_drv(psp) \
((psp)->funcs->bootloader_load_ras_drv ? \
(psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
+#define psp_bootloader_load_ipkeymgr_drv(psp) \
+ ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
+ (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 8ebab6f22e5a..1adc81a55734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -122,6 +122,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
+#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -1045,6 +1047,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct ras_manager *ras_mgr,
struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
const char *blk_name,
bool is_ue,
bool is_de)
@@ -1052,27 +1055,28 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct amdgpu_smuio_mcm_config_info *mcm_info;
struct ras_err_node *err_node;
struct ras_err_info *err_info;
+ u64 event_id = qctx->event_id;
if (is_ue) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ue_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new uncorrectable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new uncorrectable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ue_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld uncorrectable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld uncorrectable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
}
} else {
@@ -1081,44 +1085,44 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->de_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new deferred hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new deferred hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->de_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld deferred hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id,
- err_info->de_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld deferred hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->de_count, blk_name);
}
} else {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new correctable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ce_count,
+ blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id,
- err_info->ce_count, blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld correctable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->ce_count, blk_name);
}
}
}
@@ -1131,77 +1135,79 @@ static inline bool err_data_has_source_info(struct ras_err_data *data)
static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
struct ras_query_if *query_if,
- struct ras_err_data *err_data)
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->event_id;
if (err_data->ce_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, false, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld correctable hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld correctable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ce_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld correctable hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.ce_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ce_count,
+ blk_name);
}
}
if (err_data->ue_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, true, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld uncorrectable hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ue_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.ue_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ue_count,
+ blk_name);
}
}
if (err_data->de_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
blk_name, false, true);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
- dev_info(adev->dev, "socket: %d, die: %d "
- "%ld deferred hardware errors "
- "detected in %s block\n",
- adev->smuio.funcs->get_socket_id(adev),
- adev->smuio.funcs->get_die_id(adev),
- ras_mgr->err_data.de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld deferred hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.de_count,
+ blk_name);
} else {
- dev_info(adev->dev, "%ld deferred hardware errors "
- "detected in %s block\n",
- ras_mgr->err_data.de_count,
- blk_name);
+ RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.de_count,
+ blk_name);
}
}
}
@@ -1244,6 +1250,10 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
{
struct ras_manager *obj;
+ /* in resume phase, no need to create aca fs node */
+ if (adev->in_suspend || amdgpu_in_reset(adev))
+ return 0;
+
obj = get_ras_manager(adev, blk);
if (!obj)
return -EINVAL;
@@ -1265,7 +1275,8 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
}
static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
- enum aca_error_type type, struct ras_err_data *err_data)
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
{
struct ras_manager *obj;
@@ -1273,7 +1284,7 @@ static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu
if (!obj)
return -EINVAL;
- return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
+ return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
}
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
@@ -1287,13 +1298,14 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
- return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
- "ce", info.ce_count);
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.ue_count);
}
static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
struct ras_query_if *info,
struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
unsigned int error_query_mode)
{
enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
@@ -1329,17 +1341,21 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
} else {
if (amdgpu_aca_is_enabled(adev)) {
- ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
if (ret)
return ret;
- ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
if (ret)
return ret;
} else {
/* FIXME: add code to check return value later */
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
}
}
@@ -1351,6 +1367,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data;
+ struct ras_query_context qctx;
unsigned int error_query_mode;
int ret;
@@ -1364,8 +1381,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
return -EINVAL;
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
+ RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
ret = amdgpu_ras_query_error_status_helper(adev, info,
&err_data,
+ &qctx,
error_query_mode);
if (ret)
goto out_fini_err_data;
@@ -1376,7 +1397,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
info->ce_count = obj->err_data.ce_count;
info->de_count = obj->err_data.de_count;
- amdgpu_ras_error_generate_report(adev, info, &err_data);
+ amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
out_fini_err_data:
amdgpu_ras_error_data_fini(&err_data);
@@ -2041,7 +2062,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- amdgpu_umc_poison_handler(adev, obj->head.block, false);
+ amdgpu_umc_poison_handler(adev, obj->head.block, 0);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -2061,6 +2082,17 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
{
dev_info(obj->adev->dev,
"Poison is created\n");
+
+ if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
+
+ amdgpu_ras_put_poison_req(obj->adev,
+ AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false);
+
+ atomic_inc(&con->page_retirement_req_cnt);
+
+ wake_up(&con->page_retirement_wq);
+ }
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2371,7 +2403,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page);
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
else if (status == -ENOENT)
@@ -2384,6 +2416,19 @@ out:
return ret;
}
+static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive, bool status)
+{
+ struct amdgpu_device *tmp_adev;
+
+ if (hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ amdgpu_ras_set_fed(tmp_adev, status);
+ } else {
+ amdgpu_ras_set_fed(adev, status);
+ }
+}
+
static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
@@ -2393,8 +2438,21 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
+ if (hive) {
atomic_set(&hive->ras_recovery, 1);
+
+ /* If any device which is part of the hive received RAS fatal
+ * error interrupt, set fatal error status on all. This
+ * condition will need a recovery, and flag will be cleared
+ * as part of recovery.
+ */
+ list_for_each_entry(remote_adev, &hive->device_list,
+ gmc.xgmi.head)
+ if (amdgpu_ras_get_fed_status(remote_adev)) {
+ amdgpu_ras_set_fed_all(adev, hive, true);
+ break;
+ }
+ }
if (!ras->disable_ras_err_cnt_harvest) {
/* Build list of devices to query RAS related errors */
@@ -2439,18 +2497,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- /* For any RAS error that needs a full reset to
- * recover, set the fatal error status
- */
- if (hive) {
- list_for_each_entry(remote_adev,
- &hive->device_list,
- gmc.xgmi.head)
- amdgpu_ras_set_fed(remote_adev,
- true);
- } else {
- amdgpu_ras_set_fed(adev, true);
- }
psp_fatal_error_recovery_quirk(&adev->psp);
}
}
@@ -2516,9 +2562,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_GPU_PAGE_SIZE);
+ amdgpu_ras_reserve_page(adev, bps[i].retired_page);
memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
data->count++;
@@ -2674,10 +2718,167 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
}
}
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ int ret = 0;
+ struct ras_poison_msg poison_msg;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ memset(&poison_msg, 0, sizeof(poison_msg));
+ poison_msg.block = block;
+ poison_msg.pasid = pasid;
+ poison_msg.reset = reset;
+ poison_msg.pasid_fn = pasid_fn;
+ poison_msg.data = data;
+
+ ret = kfifo_put(&con->poison_fifo, poison_msg);
+ if (!ret) {
+ dev_err(adev->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
+ struct ras_poison_msg *poison_msg)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return kfifo_get(&con->poison_fifo, poison_msg);
+}
+
+static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
+{
+ mutex_init(&ecc_log->lock);
+
+ /* Set any value as siphash key */
+ memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
+
+ INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
+ ecc_log->de_updated = false;
+}
+
+static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ struct ras_ecc_err *ecc_err;
+
+ mutex_lock(&ecc_log->lock);
+ radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
+ ecc_err = radix_tree_deref_slot(slot);
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
+ }
+ mutex_unlock(&ecc_log->lock);
+
+ mutex_destroy(&ecc_log->lock);
+ ecc_log->de_updated = false;
+}
+
+static void amdgpu_ras_do_page_retirement(struct work_struct *work)
+{
+ struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+ page_retirement_dwork.work);
+ struct amdgpu_device *adev = con->adev;
+ struct ras_err_data err_data;
+
+ if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery))
+ return;
+
+ amdgpu_ras_error_data_init(&err_data);
+
+ amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
+ UMC_ECC_NEW_DETECTED_TAG))
+ schedule_delayed_work(&con->page_retirement_dwork,
+ msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL));
+ mutex_unlock(&con->umc_ecc_log.lock);
+}
+
+static int amdgpu_ras_query_ecc_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block ras_block, uint32_t timeout_ms)
+{
+ int ret = 0;
+ struct ras_ecc_log_info *ecc_log;
+ struct ras_query_if info;
+ uint32_t timeout = timeout_ms;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ memset(&info, 0, sizeof(info));
+ info.head.block = ras_block;
+
+ ecc_log = &ras->umc_ecc_log;
+ ecc_log->de_updated = false;
+ do {
+ ret = amdgpu_ras_query_error_status(adev, &info);
+ if (ret) {
+ dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret);
+ return ret;
+ }
+
+ if (timeout && !ecc_log->de_updated) {
+ msleep(1);
+ timeout--;
+ }
+ } while (timeout && !ecc_log->de_updated);
+
+ if (timeout_ms && !timeout) {
+ dev_warn(adev->dev, "Can't find deferred error\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
+ uint32_t timeout)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ ret = amdgpu_ras_query_ecc_status(adev, AMDGPU_RAS_BLOCK__UMC, timeout);
+ if (!ret)
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
+}
+
+static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ struct ras_poison_msg *poison_msg)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint32_t reset = poison_msg->reset;
+ uint16_t pasid = poison_msg->pasid;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (poison_msg->pasid_fn)
+ poison_msg->pasid_fn(adev, pasid, poison_msg->data);
+
+ if (reset) {
+ flush_delayed_work(&con->page_retirement_dwork);
+
+ con->gpu_reset_flags |= reset;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ return 0;
+}
+
static int amdgpu_ras_page_retirement_thread(void *param)
{
struct amdgpu_device *adev = (struct amdgpu_device *)param;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_poison_msg poison_msg;
+ enum amdgpu_ras_block ras_block;
+ bool poison_creation_is_handled = false;
while (!kthread_should_stop()) {
@@ -2688,13 +2889,34 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
- dev_info(adev->dev, "Start processing page retirement. request:%d\n",
- atomic_read(&con->page_retirement_req_cnt));
-
atomic_dec(&con->page_retirement_req_cnt);
- amdgpu_umc_bad_page_polling_timeout(adev,
- false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ if (!amdgpu_ras_get_poison_req(adev, &poison_msg))
+ continue;
+
+ ras_block = poison_msg.block;
+
+ dev_info(adev->dev, "Start processing ras block %s(%d)\n",
+ ras_block_str(ras_block), ras_block);
+
+ if (ras_block == AMDGPU_RAS_BLOCK__UMC) {
+ amdgpu_ras_poison_creation_handler(adev,
+ MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ poison_creation_is_handled = true;
+ } else {
+ /* poison_creation_is_handled:
+ * false: no poison creation interrupt, but it has poison
+ * consumption interrupt.
+ * true: It has poison creation interrupt at the beginning,
+ * but it has no poison creation interrupt later.
+ */
+ amdgpu_ras_poison_creation_handler(adev,
+ poison_creation_is_handled ?
+ 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC);
+
+ amdgpu_ras_poison_consumption_handler(adev, &poison_msg);
+ poison_creation_is_handled = false;
+ }
}
return 0;
@@ -2763,6 +2985,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
}
}
+ mutex_init(&con->page_rsv_lock);
+ INIT_KFIFO(con->poison_fifo);
mutex_init(&con->page_retirement_lock);
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
@@ -2773,6 +2997,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
}
+ INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
+ amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
#ifdef CONFIG_X86_MCE_AMD
if ((adev->asic_type == CHIP_ALDEBARAN) &&
(adev->gmc.xgmi.connected_to_cpu))
@@ -2813,8 +3039,14 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
atomic_set(&con->page_retirement_req_cnt, 0);
+ mutex_destroy(&con->page_rsv_lock);
+
cancel_work_sync(&con->recovery_work);
+ cancel_delayed_work_sync(&con->page_retirement_dwork);
+
+ amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
+
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
@@ -3036,6 +3268,35 @@ static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
AMDGPU_RAS_ERROR__PARITY;
}
+static void ras_event_mgr_init(struct ras_event_manager *mgr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++)
+ atomic64_set(&mgr->seqnos[i], 0);
+}
+
+static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_hive_info *hive;
+
+ if (!ras)
+ return;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_in_reset(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_event_mgr_init(ras->event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -3356,6 +3617,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_ras_event_mgr_init(adev);
+
if (amdgpu_aca_is_enabled(adev)) {
if (amdgpu_in_reset(adev))
r = amdgpu_aca_reset(adev);
@@ -3472,14 +3735,39 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
atomic_set(&ras->fed, !!status);
}
+bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id)
+{
+ return !(id & BIT_ULL(63));
+}
+
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u64 id;
+
+ switch (type) {
+ case RAS_EVENT_TYPE_ISR:
+ id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]);
+ break;
+ case RAS_EVENT_TYPE_INVALID:
+ default:
+ id = BIT_ULL(63) | 0ULL;
+ break;
+ }
+
+ return id;
+}
+
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]);
- dev_info(adev->dev, "uncorrectable hardware error"
- "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ amdgpu_ras_set_fed(adev, true);
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
@@ -3998,6 +4286,8 @@ void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_a
{
struct ras_err_addr *mca_err_addr;
+ /* This function will be retired. */
+ return;
mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
if (!mca_err_addr)
return;
@@ -4195,3 +4485,19 @@ void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
}
}
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
+ int ret = 0;
+
+ mutex_lock(&con->page_rsv_lock);
+ ret = amdgpu_vram_mgr_query_page_status(mgr, start);
+ if (ret == -ENOENT)
+ ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
+ mutex_unlock(&con->page_rsv_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index e0f8ce9d8440..c8980d5f6540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -26,6 +26,9 @@
#include <linux/debugfs.h>
#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/radix-tree.h>
+#include <linux/siphash.h>
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
@@ -64,6 +67,14 @@ struct amdgpu_iv_entry;
/* The high three bits indicates socketid */
#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+#define RAS_EVENT_LOG(_adev, _id, _fmt, ...) \
+do { \
+ if (amdgpu_ras_event_id_is_valid((_adev), (_id))) \
+ dev_info((_adev)->dev, "{%llu}" _fmt, (_id), ##__VA_ARGS__); \
+ else \
+ dev_info((_adev)->dev, _fmt, ##__VA_ARGS__); \
+} while (0)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -419,6 +430,52 @@ struct umc_ecc_info {
int record_ce_addr_supported;
};
+enum ras_event_type {
+ RAS_EVENT_TYPE_INVALID = -1,
+ RAS_EVENT_TYPE_ISR = 0,
+ RAS_EVENT_TYPE_COUNT,
+};
+
+struct ras_event_manager {
+ atomic64_t seqnos[RAS_EVENT_TYPE_COUNT];
+};
+
+struct ras_query_context {
+ enum ras_event_type type;
+ u64 event_id;
+};
+
+typedef int (*pasid_notify)(struct amdgpu_device *adev,
+ uint16_t pasid, void *data);
+
+struct ras_poison_msg {
+ enum amdgpu_ras_block block;
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+};
+
+struct ras_err_pages {
+ uint32_t count;
+ uint64_t *pfn;
+};
+
+struct ras_ecc_err {
+ u64 hash_index;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+ struct ras_err_pages err_pages;
+};
+
+struct ras_ecc_log_info {
+ struct mutex lock;
+ siphash_key_t ecc_key;
+ struct radix_tree_root de_page_tree;
+ bool de_updated;
+};
+
struct amdgpu_ras {
/* ras infrastructure */
/* for ras itself. */
@@ -477,8 +534,18 @@ struct amdgpu_ras {
wait_queue_head_t page_retirement_wq;
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
+ struct mutex page_rsv_lock;
+ DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
+ struct ras_ecc_log_info umc_ecc_log;
+ struct delayed_work page_retirement_dwork;
+
/* Fatal error detected flag */
atomic_t fed;
+
+ /* RAS event manager */
+ struct ras_event_manager __event_mgr;
+ struct ras_event_manager *event_mgr;
+
};
struct ras_fs_data {
@@ -512,6 +579,7 @@ struct ras_err_data {
unsigned long de_count;
unsigned long err_addr_cnt;
struct eeprom_table_record *err_addr;
+ unsigned long err_addr_len;
u32 err_list_count;
struct list_head err_node_list;
};
@@ -879,4 +947,13 @@ void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id);
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index b12808c0c331..06a62a8a992e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -404,6 +404,22 @@ static int amdgpu_ras_eeprom_correct_header_tag(
return res;
}
+static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 10, 0):
+ case IP_VERSION(12, 0, 0):
+ hdr->version = RAS_TABLE_VER_V2_1;
+ return;
+ default:
+ hdr->version = RAS_TABLE_VER_V1;
+ return;
+ }
+}
+
/**
* amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table
* @control: pointer to control structure
@@ -423,11 +439,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
mutex_lock(&control->ras_tbl_mutex);
hdr->header = RAS_TABLE_HDR_VAL;
- if (adev->umc.ras &&
- adev->umc.ras->set_eeprom_table_version)
- adev->umc.ras->set_eeprom_table_version(hdr);
- else
- hdr->version = RAS_TABLE_VER_V1;
+ amdgpu_ras_set_eeprom_table_version(control);
if (hdr->version == RAS_TABLE_VER_V2_1) {
hdr->first_rec_offset = RAS_RECORD_START_V2_1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 381101d2bf05..50fcd86e1033 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -164,4 +164,29 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
}
}
+/**
+ * amdgpu_res_cleared - check if blocks are cleared
+ *
+ * @cur: the cursor to extract the block
+ *
+ * Check if the @cur block is cleared
+ */
+static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
+{
+ struct drm_buddy_block *block;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ if (!amdgpu_vram_mgr_is_cleared(block))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 147100c27c2d..ea4873f6ccd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -21,9 +21,6 @@
*
*/
-#include <linux/devcoredump.h>
-#include <generated/utsrelease.h>
-
#include "amdgpu_reset.h"
#include "aldebaran.h"
#include "sienna_cichlid.h"
@@ -161,105 +158,3 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
atomic_set(&reset_domain->in_gpu_reset, 0);
up_write(&reset_domain->sem);
}
-
-#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
-{
-}
-#else
-static ssize_t
-amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen)
-{
- struct drm_printer p;
- struct amdgpu_coredump_info *coredump = data;
- struct drm_print_iterator iter;
- int i;
-
- iter.data = buffer;
- iter.offset = 0;
- iter.start = offset;
- iter.remain = count;
-
- p = drm_coredump_printer(&iter);
-
- drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
- drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
- drm_printf(&p, "kernel: " UTS_RELEASE "\n");
- drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
-
- if (coredump->reset_task_info.pid)
- drm_printf(&p, "process_name: %s PID: %d\n",
- coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
-
- if (coredump->ring) {
- drm_printf(&p, "\nRing timed out details\n");
- drm_printf(&p, "IP Type: %d Ring Name: %s\n",
- coredump->ring->funcs->type,
- coredump->ring->name);
- }
-
- if (coredump->reset_vram_lost)
- drm_printf(&p, "VRAM is lost due to GPU reset!\n");
- if (coredump->adev->reset_info.num_regs) {
- drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
-
- for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
- drm_printf(&p, "0x%08x: 0x%08x\n",
- coredump->adev->reset_info.reset_dump_reg_list[i],
- coredump->adev->reset_info.reset_dump_reg_value[i]);
- }
-
- return count - iter.remain;
-}
-
-static void amdgpu_devcoredump_free(void *data)
-{
- kfree(data);
-}
-
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
-{
- struct amdgpu_coredump_info *coredump;
- struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
- struct drm_sched_job *s_job;
-
- coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
-
- if (!coredump) {
- DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
- return;
- }
-
- coredump->reset_vram_lost = vram_lost;
-
- if (reset_context->job && reset_context->job->vm) {
- struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
-
- ti = amdgpu_vm_get_task_info_vm(vm);
- if (ti) {
- coredump->reset_task_info = *ti;
- amdgpu_vm_put_task_info(ti);
- }
- }
-
- if (job) {
- s_job = &job->base;
- coredump->ring = to_amdgpu_ring(s_job->sched);
- }
-
- coredump->adev = adev;
-
- ktime_get_ts64(&coredump->reset_time);
-
- dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
- amdgpu_devcoredump_read, amdgpu_devcoredump_free);
-}
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 60522963aaca..b11d190ece53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_COREDUMP = 2,
};
struct amdgpu_reset_context {
@@ -88,19 +89,6 @@ struct amdgpu_reset_domain {
atomic_t reset_res;
};
-#ifdef CONFIG_DEV_COREDUMP
-
-#define AMDGPU_COREDUMP_VERSION "1"
-
-struct amdgpu_coredump_info {
- struct amdgpu_device *adev;
- struct amdgpu_task_info reset_task_info;
- struct timespec64 reset_time;
- bool reset_vram_lost;
- struct amdgpu_ring *ring;
-};
-#endif
-
int amdgpu_reset_init(struct amdgpu_device *adev);
int amdgpu_reset_fini(struct amdgpu_device *adev);
@@ -141,9 +129,6 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
#define for_each_handler(i, handler, reset_ctl) \
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
(handler = (*reset_ctl->reset_handlers)[i]); \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5505d646f43a..06f0a6534a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd;
- int r;
+ u32 *kbuf;
+ int r, i;
uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- result = 0;
+ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- return r;
+ goto err_free;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
+ if (r)
+ goto err_unreserve;
+ /*
+ * Copy to local buffer to avoid put_user(), which might fault
+ * and acquire mmap_sem, under reservation_ww_class_mutex.
+ */
+ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
+ kbuf[i] = mqd[i];
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ result = 0;
while (size) {
if (*pos >= ring->mqd_size)
- goto done;
+ break;
- value = mqd[*pos/4];
+ value = kbuf[*pos/4];
r = put_user(value, (uint32_t *)buf);
if (r)
- goto done;
+ goto err_free;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
-done:
- amdgpu_bo_kunmap(ring->mqd_obj);
- mqd = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r)
- return r;
-
+ kfree(kbuf);
return result;
+
+err_unreserve:
+ amdgpu_bo_unreserve(ring->mqd_obj);
+err_free:
+ kfree(kbuf);
+ return r;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 173a2a308078..b51a82e711df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -132,7 +132,7 @@ struct amdgpu_buffer_funcs {
uint64_t dst_offset,
/* number of byte to transfer */
uint32_t byte_count,
- bool tmz);
+ uint32_t copy_flags);
/* maximum bytes in a single operation */
uint32_t fill_max_bytes;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index ff4435181055..ec9d12f85f39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -44,6 +44,7 @@ struct amdgpu_smuio_funcs {
u32 (*get_socket_id)(struct amdgpu_device *adev);
enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev);
bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
+ u64 (*get_gpu_clock_counter)(struct amdgpu_device *adev);
};
struct amdgpu_smuio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fc418e670fda..3749892bf702 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -236,7 +236,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes, false);
+ dst_addr, num_bytes, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -296,6 +296,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct dma_fence *fence = NULL;
int r = 0;
+ uint32_t copy_flags = 0;
+
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
@@ -323,8 +325,11 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
if (r)
goto error;
- r = amdgpu_copy_buffer(ring, from, to, cur_size,
- resv, &next, false, true, tmz);
+ if (tmz)
+ copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
+
+ r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+ &next, false, true, copy_flags);
if (r)
goto error;
@@ -378,11 +383,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
- false);
+ r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
+ false);
if (r) {
goto error;
} else if (wipe_fence) {
+ amdgpu_vram_mgr_set_cleared(bo->resource);
dma_fence_put(fence);
fence = wipe_fence;
}
@@ -403,40 +409,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -529,8 +550,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -557,7 +578,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -568,9 +588,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
@@ -1477,7 +1494,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
swap(src_addr, dst_addr);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
- PAGE_SIZE, false);
+ PAGE_SIZE, 0);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -2128,7 +2145,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush, bool tmz)
+ bool vm_needs_flush, uint32_t copy_flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2154,8 +2171,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes, tmz);
-
+ dst_offset, cur_size_in_bytes, copy_flags);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -2215,6 +2231,71 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
return 0;
}
+/**
+ * amdgpu_ttm_clear_buffer - clear memory buffers
+ * @bo: amdgpu buffer object
+ * @resv: reservation object
+ * @fence: dma_fence associated with the operation
+ *
+ * Clear the memory buffer resource.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_res_cursor cursor;
+ u64 addr;
+ int r;
+
+ if (!adev->mman.buffer_funcs_enabled)
+ return -EINVAL;
+
+ if (!fence)
+ return -EINVAL;
+
+ *fence = dma_fence_get_stub();
+
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (cursor.remaining) {
+ struct dma_fence *next = NULL;
+ u64 size;
+
+ if (amdgpu_res_cleared(&cursor)) {
+ amdgpu_res_next(&cursor, cursor.size);
+ continue;
+ }
+
+ /* Never clear more than 256MiB at once to avoid timeouts */
+ size = min(cursor.size, 256ULL << 20);
+
+ r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
+ 1, ring, false, &size, &addr);
+ if (r)
+ goto err;
+
+ r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
+ &next, true, true);
+ if (r)
+ goto err;
+
+ dma_fence_put(*fence);
+ *fence = next;
+
+ amdgpu_res_next(&cursor, size);
+ }
+err:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+
+ return r;
+}
+
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e..b6f53129dea3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -38,8 +38,6 @@
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
-#define AMDGPU_POISON 0xd0bed0be
-
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
@@ -111,6 +109,8 @@ struct amdgpu_copy_mem {
unsigned long offset;
};
+#define AMDGPU_COPY_FLAGS_TMZ (1 << 0)
+
int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
@@ -148,13 +151,16 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush, bool tmz);
+ bool vm_needs_flush, uint32_t copy_flags);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const struct amdgpu_copy_mem *src,
const struct amdgpu_copy_mem *dst,
uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f);
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 619445760037..105d4de0613a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -125,6 +125,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
+ PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 20436f81856a..540e0f066b26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -21,10 +21,13 @@
*
*/
+#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
+#define MAX_UMC_HASH_STRING_SIZE 256
+
static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
uint32_t ch_inst, uint32_t umc_inst)
@@ -63,6 +66,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
goto out_fini_err_data;
}
+ err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
/*
* Translate UMC channel address to Physical address
*/
@@ -86,7 +91,7 @@ out_fini_err_data:
return ret;
}
-static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -118,6 +123,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if(!err_data->err_addr)
dev_warn(adev->dev, "Failed to alloc memory for "
"umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -143,6 +150,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if(!err_data->err_addr)
dev_warn(adev->dev, "Failed to alloc memory for "
"umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -170,6 +179,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
}
kfree(err_data->err_addr);
+ err_data->err_addr = NULL;
mutex_unlock(&con->page_retirement_lock);
}
@@ -177,7 +187,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry,
- bool reset)
+ uint32_t reset)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -186,9 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if (err_data->ue_count && reset) {
- /* use mode-2 reset for poison consumption */
- if (!entry)
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -196,7 +204,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
}
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms)
+ uint32_t reset, uint32_t timeout_ms)
{
struct ras_err_data err_data;
struct ras_common_if head = {
@@ -238,16 +246,16 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
if (reset) {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- /* use mode-2 reset for poison consumption */
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
return 0;
}
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
{
int ret = AMDGPU_RAS_SUCCESS;
@@ -285,16 +293,14 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
amdgpu_ras_error_data_fini(&err_data);
} else {
- if (reset) {
- amdgpu_umc_bad_page_polling_timeout(adev,
- reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
- } else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ amdgpu_ras_put_poison_req(adev,
+ block, pasid, pasid_fn, data, reset);
+
atomic_inc(&con->page_retirement_req_cnt);
wake_up(&con->page_retirement_wq);
- }
}
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
@@ -307,11 +313,19 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset)
+{
+ return amdgpu_umc_pasid_poison_handler(adev,
+ block, 0, NULL, NULL, reset);
+}
+
int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry)
{
- return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
+ AMDGPU_RAS_GPU_RESET_MODE1_RESET);
}
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
@@ -388,14 +402,20 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
uint64_t err_addr,
uint64_t retired_page,
uint32_t channel_index,
uint32_t umc_inst)
{
- struct eeprom_table_record *err_rec =
- &err_data->err_addr[err_data->err_addr_cnt];
+ struct eeprom_table_record *err_rec;
+
+ if (!err_data ||
+ !err_data->err_addr ||
+ (err_data->err_addr_cnt >= err_data->err_addr_len))
+ return -EINVAL;
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
err_rec->address = err_addr;
/* page frame address is saved */
@@ -407,6 +427,8 @@ void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
err_rec->mcumc_id = umc_inst;
err_data->err_addr_cnt++;
+
+ return 0;
}
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
@@ -439,3 +461,76 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
return 0;
}
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ if (adev->umc.ras->update_ecc_status)
+ return adev->umc.ras->update_ecc_status(adev,
+ status, ipid, addr);
+ return 0;
+}
+
+static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
+{
+ uint64_t *addr_a = (uint64_t *)a;
+ uint64_t *addr_b = (uint64_t *)b;
+
+ if (*addr_a > *addr_b)
+ return 1;
+ else if (*addr_a < *addr_b)
+ return -1;
+ else
+ return 0;
+}
+
+/* Use string hash to avoid logging the same bad pages repeatedly */
+int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
+ uint64_t *pfns, int len, uint64_t *val)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
+ int offset = 0, i = 0;
+ uint64_t hash_val;
+
+ if (!pfns || !len)
+ return -EINVAL;
+
+ sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
+
+ for (i = 0; i < len; i++)
+ offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
+
+ hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
+
+ *val = hash_val;
+
+ return 0;
+}
+
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_log_info *ecc_log;
+ int ret;
+
+ ecc_log = &con->umc_ecc_log;
+
+ mutex_lock(&ecc_log->lock);
+ ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
+ if (!ret) {
+ struct ras_err_pages *err_pages = &ecc_err->err_pages;
+ int i;
+
+ /* Reserve memory */
+ for (i = 0; i < err_pages->count; i++)
+ amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
+
+ radix_tree_tag_set(ecc_tree,
+ ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ecc_log->lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 26d2ae498daf..5f50c69c3cec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -52,6 +52,8 @@
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
+/* Page retirement tag */
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -66,8 +68,8 @@ struct amdgpu_umc_ras {
void *ras_error_status);
bool (*check_ecc_err_status)(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, void *ras_error_status);
- /* support different eeprom table version for different asic */
- void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
+ int (*update_ecc_status)(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
};
struct amdgpu_umc_funcs {
@@ -103,11 +105,14 @@ struct amdgpu_umc {
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
uint64_t err_addr,
uint64_t retired_page,
uint32_t channel_index,
@@ -123,5 +128,15 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms);
+ uint32_t reset, uint32_t timeout_ms);
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
+int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
+ uint64_t *pfns, int len, uint64_t *val);
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
+
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index ab820cf52668..e01c1c8e64c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
mqd->rptr_val = 0;
mqd->unmapped = 1;
+ if (adev->vpe.collaborate_mode)
+ memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
+
qinfo->mqd_addr = test->mqd_data_gpu_addr;
qinfo->csa_addr = test->ctx_data_gpu_addr +
offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
+ qinfo->doorbell_offset_0 = 0;
qinfo->doorbell_offset_1 = 0;
}
@@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
ring[5] = 0;
mqd->wptr_val = (6 << 2);
- // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
+ if (adev->vpe.collaborate_mode)
+ (++mqd)->wptr_val = (6 << 2);
+
+ WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
for (i = 0; i < adev->usec_timeout; i++) {
if (*fence == test_pattern)
@@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
fw_name = "amdgpu/umsch_mm_4_0_0.bin";
break;
default:
@@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
break;
default:
@@ -766,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
@@ -867,6 +878,8 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
.hw_fini = umsch_mm_hw_fini,
.suspend = umsch_mm_suspend,
.resume = umsch_mm_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
index 8258a43a6236..5014b5af95fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
@@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE {
UMSCH_SWIP_ENGINE_TYPE_MAX
};
-enum UMSCH_SWIP_AFFINITY_TYPE {
- UMSCH_SWIP_AFFINITY_TYPE_ANY = 0,
- UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1,
- UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2,
- UMSCH_SWIP_AFFINITY_TYPE_MAX
-};
-
enum UMSCH_CONTEXT_PRIORITY_LEVEL {
CONTEXT_PRIORITY_LEVEL_IDLE = 0,
CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
@@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL {
struct umsch_mm_set_resource_input {
uint32_t vmid_mask_mm_vcn;
uint32_t vmid_mask_mm_vpe;
+ uint32_t collaboration_mask_vpe;
uint32_t logging_vmid;
uint32_t engine_mask;
union {
struct {
uint32_t disable_reset : 1;
uint32_t disable_umsch_mm_log : 1;
- uint32_t reserved : 30;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 29;
};
uint32_t uint32_all;
};
@@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input {
uint32_t doorbell_offset_1;
enum UMSCH_SWIP_ENGINE_TYPE engine_type;
uint32_t affinity;
- enum UMSCH_SWIP_AFFINITY_TYPE affinity_type;
uint64_t mqd_addr;
uint64_t h_context;
uint64_t h_queue;
uint32_t vm_context_cntl;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
+
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
};
@@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input {
uint32_t doorbell_offset_0;
uint32_t doorbell_offset_1;
uint64_t context_csa_addr;
+ uint32_t context_csa_array_index;
};
struct MQD_INFO {
@@ -103,6 +102,7 @@ struct MQD_INFO {
uint32_t wptr_val;
uint32_t rptr_val;
uint32_t unmapped;
+ uint32_t vmid;
};
struct amdgpu_umsch_mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 59acf424a078..968ca2c84ef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -743,7 +743,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
- uint32_t *size = &tmp;
+ uint32_t dummy = 0xffffffff;
+ uint32_t *size = &dummy;
unsigned int idx;
int i, r = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9c514a606a2f..677eb141554e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -93,7 +93,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
- char ucode_prefix[30];
+ char ucode_prefix[25];
char fw_name[40];
int r, i;
@@ -185,7 +185,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
+ log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
+ } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index a418393d89ec..9f06def236fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -454,6 +454,16 @@ struct amdgpu_vcn_rb_metadata {
uint8_t pad[26];
};
+struct amdgpu_vcn5_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[12];
+ struct amdgpu_fw_shared_unified_queue_struct sq;
+ uint8_t pad1[8];
+ struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ uint8_t pad2[4];
+};
+
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 7a4eae36778a..54ab51a4ada7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
@@ -424,7 +425,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
return -EINVAL;
if (pf2vf_info->size > 1024) {
- DRM_ERROR("invalid pf2vf message size\n");
+ dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
return -EINVAL;
}
@@ -435,7 +436,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
adev->virt.fw_reserve.checksum_key, checksum);
if (checksum != checkval) {
- DRM_ERROR("invalid pf2vf message\n");
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
return -EINVAL;
}
@@ -449,7 +452,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
0, checksum);
if (checksum != checkval) {
- DRM_ERROR("invalid pf2vf message\n");
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
return -EINVAL;
}
@@ -485,7 +490,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
break;
default:
- DRM_ERROR("invalid pf2vf version\n");
+ dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
return -EINVAL;
}
@@ -571,6 +576,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->decode_usage = 0;
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
+
+ if (adev->mes.resource_1) {
+ vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
+ }
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
@@ -584,8 +594,22 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
int ret;
ret = amdgpu_virt_read_pf2vf_data(adev);
- if (ret)
+ if (ret) {
+ adev->virt.vf2pf_update_retry_cnt++;
+ if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
+ amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) {
+ amdgpu_ras_set_fed(adev, true);
+ if (amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work))
+ return;
+ else
+ dev_err(adev->dev, "Failed to queue work! at %s", __func__);
+ }
+
goto out;
+ }
+
+ adev->virt.vf2pf_update_retry_cnt = 0;
amdgpu_virt_write_vf2pf_data(adev);
out:
@@ -606,6 +630,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
+ adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
@@ -705,12 +730,6 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
- if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
- /* VF MMIO access (except mailbox range) from CPU
- * will be blocked during sriov runtime
- */
- adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
-
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3f59b7b5523f..642f1fd287d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -52,6 +52,8 @@
/* tonga/fiji use this offset */
#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -130,6 +132,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
/* VCN RB decouple */
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
+ /* MES info */
+ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -257,6 +261,7 @@ struct amdgpu_virt {
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
+ int vf2pf_update_retry_cnt;
/* multimedia bandwidth config */
bool is_mm_bw_enabled;
@@ -332,6 +337,8 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
#define amdgpu_sriov_is_vcn_rb_decouple(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
+#define amdgpu_sriov_is_mes_info_enable(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 8baa2e0935cc..e30eecd02ae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -658,6 +658,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.soft_reset = amdgpu_vkms_soft_reset,
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
.set_powergating_state = amdgpu_vkms_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4299ce386322..4e2391c83d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -886,6 +886,44 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
}
/**
+ * amdgpu_vm_tlb_flush - prepare TLB flush
+ *
+ * @params: parameters for update
+ * @fence: input fence to sync TLB flush with
+ * @tlb_cb: the callback structure
+ *
+ * Increments the tlb sequence to make sure that future CS execute a VM flush.
+ */
+static void
+amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
+ struct dma_fence **fence,
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb)
+{
+ struct amdgpu_vm *vm = params->vm;
+
+ if (!fence || !*fence)
+ return;
+
+ tlb_cb->vm = vm;
+ if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
+ amdgpu_vm_tlb_seq_cb)) {
+ dma_fence_put(vm->last_tlb_flush);
+ vm->last_tlb_flush = dma_fence_get(*fence);
+ } else {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
+ }
+
+ /* Prepare a TLB flush fence to be attached to PTs */
+ if (!params->unlocked && vm->is_compute_context) {
+ amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
+
+ /* Makes sure no PD/PT is freed before the flush */
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+}
+
+/**
* amdgpu_vm_update_range - update a range in the vm page table
*
* @adev: amdgpu_device pointer to use for commands
@@ -916,8 +954,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence)
{
- struct amdgpu_vm_update_params params;
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
+ struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
int r, idx;
@@ -927,8 +965,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
if (!tlb_cb) {
- r = -ENOMEM;
- goto error_unlock;
+ drm_dev_exit(idx);
+ return -ENOMEM;
}
/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
@@ -948,7 +986,9 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.immediate = immediate;
params.pages_addr = pages_addr;
params.unlocked = unlocked;
+ params.needs_flush = flush_tlb;
params.allow_override = allow_override;
+ INIT_LIST_HEAD(&params.tlb_flush_waitlist);
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@@ -1031,24 +1071,18 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = vm->update_funcs->commit(&params, fence);
+ if (r)
+ goto error_free;
- if (flush_tlb || params.table_freed) {
- tlb_cb->vm = vm;
- if (fence && *fence &&
- !dma_fence_add_callback(*fence, &tlb_cb->cb,
- amdgpu_vm_tlb_seq_cb)) {
- dma_fence_put(vm->last_tlb_flush);
- vm->last_tlb_flush = dma_fence_get(*fence);
- } else {
- amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
- }
+ if (params.needs_flush) {
+ amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
tlb_cb = NULL;
}
+ amdgpu_vm_pt_free_list(adev, &params);
+
error_free:
kfree(tlb_cb);
-
-error_unlock:
amdgpu_vm_eviction_unlock(vm);
drm_dev_exit(idx);
return r;
@@ -1613,6 +1647,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1639,21 +1704,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1706,17 +1764,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1780,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1817,10 +1867,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
@@ -2391,6 +2445,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
mutex_init(&vm->eviction_lock);
vm->evicting = false;
+ vm->tlb_fence_context = dma_fence_context_alloc(1);
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
false, &root, xcp_id);
@@ -2924,6 +2979,14 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
if (vm && status) {
vm->fault_info.addr = addr;
vm->fault_info.status = status;
+ /*
+ * Update the fault information globally for later usage
+ * when vm could be stale or freed.
+ */
+ adev->vm_manager.fault_info.addr = addr;
+ adev->vm_manager.fault_info.vmhub = vmhub;
+ adev->vm_manager.fault_info.status = status;
+
if (AMDGPU_IS_GFXHUB(vmhub)) {
vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
vm->fault_info.vmhub |=
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 047ec1930d12..54d7da396de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -257,15 +257,20 @@ struct amdgpu_vm_update_params {
unsigned int num_dw_left;
/**
- * @table_freed: return true if page table is freed when updating
+ * @needs_flush: true whenever we need to invalidate the TLB
*/
- bool table_freed;
+ bool needs_flush;
/**
* @allow_override: true for memory that is not uncached: allows MTYPE
* to be overridden for NUMA local memory.
*/
bool allow_override;
+
+ /**
+ * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
+ */
+ struct list_head tlb_flush_waitlist;
};
struct amdgpu_vm_update_funcs {
@@ -342,6 +347,7 @@ struct amdgpu_vm {
atomic64_t tlb_seq;
struct dma_fence *last_tlb_flush;
atomic64_t kfd_last_flushed_seq;
+ uint64_t tlb_fence_context;
/* How many times we had to re-generate the page tables */
uint64_t generation;
@@ -422,6 +428,8 @@ struct amdgpu_vm_manager {
* look up VM of a page fault
*/
struct xarray pasids;
+ /* Global registration of recent page fault information */
+ struct amdgpu_vm_fault_info fault_info;
};
struct amdgpu_bo_va_mapping;
@@ -544,6 +552,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
void amdgpu_vm_pt_free_work(struct work_struct *work);
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
@@ -609,5 +619,8 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
uint64_t addr,
uint32_t status,
unsigned int vmhub);
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 6e31621452de..3895bd7d176a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -108,7 +108,9 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
- /* Flush HDP */
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
mb();
amdgpu_device_flush_hdp(p->adev, NULL);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 124389a6bf48..7fdd306a48a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -622,40 +622,58 @@ void amdgpu_vm_pt_free_work(struct work_struct *work)
}
/**
- * amdgpu_vm_pt_free_dfs - free PD/PT levels
+ * amdgpu_vm_pt_free_list - free PD/PT levels
*
* @adev: amdgpu device structure
- * @vm: amdgpu vm structure
- * @start: optional cursor where to start freeing PDs/PTs
- * @unlocked: vm resv unlock status
+ * @params: see amdgpu_vm_update_params definition
*
- * Free the page directory or page table level and all sub levels.
+ * Free the page directory objects saved in the flush list
*/
-static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start,
- bool unlocked)
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params)
{
- struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm = params->vm;
+ bool unlocked = params->unlocked;
+
+ if (list_empty(&params->tlb_flush_waitlist))
+ return;
if (unlocked) {
spin_lock(&vm->status_lock);
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
- list_move(&entry->vm_status, &vm->pt_freed);
-
- if (start)
- list_move(&start->entry->vm_status, &vm->pt_freed);
+ list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
spin_unlock(&vm->status_lock);
schedule_work(&vm->pt_free_work);
return;
}
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
amdgpu_vm_pt_free(entry);
+}
- if (start)
- amdgpu_vm_pt_free(start->entry);
+/**
+ * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
+ *
+ * @params: parameters for the update
+ * @cursor: first PT entry to start DF search from, non NULL
+ *
+ * This list will be freed after TLB flush.
+ */
+static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ struct amdgpu_vm_pt_cursor seek;
+ struct amdgpu_vm_bo_base *entry;
+
+ spin_lock(&params->vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
+ if (entry && entry->bo)
+ list_move(&entry->vm_status, &params->tlb_flush_waitlist);
+ }
+
+ /* enter start node now */
+ list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
+ spin_unlock(&params->vm->status_lock);
}
/**
@@ -667,7 +685,13 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+ if (entry)
+ amdgpu_vm_pt_free(entry);
+ }
}
/**
@@ -972,10 +996,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
while (cursor.pfn < frag_start) {
/* Make sure previous mapping is freed */
if (cursor.entry->bo) {
- params->table_freed = true;
- amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor,
- params->unlocked);
+ params->needs_flush = true;
+ amdgpu_vm_pt_add_list(params, &cursor);
}
amdgpu_vm_pt_next(adev, &cursor);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 349416e176a1..66e8a016126b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -126,6 +126,10 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
+
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
WARN_ON(ib->length_dw > p->num_dw_left);
f = amdgpu_job_submit(p->job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
new file mode 100644
index 000000000000..51cddfa3f1e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gmc.h"
+
+struct amdgpu_tlb_fence {
+ struct dma_fence base;
+ struct amdgpu_device *adev;
+ struct dma_fence *dependency;
+ struct work_struct work;
+ spinlock_t lock;
+ uint16_t pasid;
+
+};
+
+static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu tlb fence";
+}
+
+static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "amdgpu tlb timeline";
+}
+
+static void amdgpu_tlb_fence_work(struct work_struct *work)
+{
+ struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
+ int r;
+
+ if (f->dependency) {
+ dma_fence_wait(f->dependency, false);
+ dma_fence_put(f->dependency);
+ f->dependency = NULL;
+ }
+
+ r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
+ if (r) {
+ dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n",
+ f->pasid);
+ dma_fence_set_error(&f->base, r);
+ }
+
+ dma_fence_signal(&f->base);
+ dma_fence_put(&f->base);
+}
+
+static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_tlb_fence_get_driver_name,
+ .get_timeline_name = amdgpu_tlb_fence_get_timeline_name
+};
+
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct dma_fence **fence)
+{
+ struct amdgpu_tlb_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f) {
+ /*
+ * We can't fail since the PDEs and PTEs are already updated, so
+ * just block for the dependency and execute the TLB flush
+ */
+ if (*fence)
+ dma_fence_wait(*fence, false);
+
+ amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
+ *fence = dma_fence_get_stub();
+ return;
+ }
+
+ f->adev = adev;
+ f->dependency = *fence;
+ f->pasid = vm->pasid;
+ INIT_WORK(&f->work, amdgpu_tlb_fence_work);
+ spin_lock_init(&f->lock);
+
+ dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+
+ /* TODO: We probably need a separate wq here */
+ dma_fence_get(&f->base);
+ schedule_work(&f->work);
+
+ *fence = &f->base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 7a65a2b128ec..c23d97d34b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
- return 0;
+ return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
@@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle)
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
+ /* Power on VPE */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
ret = vpe_load_microcode(vpe);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 8db880244324..6c30eceec896 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -450,6 +450,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
@@ -468,7 +469,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
pages_per_block = ~0ul;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -477,7 +478,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* default to 2MB */
pages_per_block = 2UL << (20UL - PAGE_SHIFT);
#endif
- pages_per_block = max_t(uint32_t, pages_per_block,
+ pages_per_block = max_t(u32, pages_per_block,
tbo->page_alignment);
}
@@ -498,9 +499,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
+ vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
+
if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -514,21 +518,31 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
min_block_size = mgr->default_page_size;
- BUG_ON(min_block_size < mm->chunk_size);
-
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
- !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
+ BUG_ON(min_block_size < mm->chunk_size);
+
r = drm_buddy_alloc_blocks(mm, fpfn,
lpfn,
size,
min_block_size,
&vres->blocks,
vres->flags);
+
+ if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
+ !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
+ vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
+ tbo->page_alignment);
+
+ continue;
+ }
+
if (unlikely(r))
goto error_free_blocks;
@@ -571,7 +585,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
return 0;
error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -604,7 +618,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
amdgpu_vram_mgr_do_reserve(man);
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, vres->flags);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -912,7 +926,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
- drm_buddy_free_list(&mgr->mm, &rsv->allocated);
+ drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
kfree(rsv);
}
if (!adev->gmc.is_app_apu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 0e04e42cf809..b256cbc2bc27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -53,10 +53,20 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
}
+static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_is_clear(block);
+}
+
static inline struct amdgpu_vram_mgr_resource *
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
{
return container_of(res, struct amdgpu_vram_mgr_resource, base);
}
+static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
+{
+ to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 20d51f6c9bb8..dd2ec48cf5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1035,15 +1035,16 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return 0;
}
-static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
struct amdgpu_device *adev = handle->adev;
+ struct aca_bank_info info;
const char *error_str;
- u64 status;
+ u64 status, count;
int ret, ext_error_code;
- ret = aca_bank_info_decode(bank, &report->info);
+ ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
@@ -1055,15 +1056,28 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc
if (error_str)
dev_info(adev->dev, "%s detected\n", error_str);
- if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) ||
- (type == ACA_ERROR_TYPE_CE && ext_error_code == 6))
- report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+ count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
- return 0;
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
+ break;
+ case ACA_SMU_TYPE_CE:
+ count = ext_error_code == 6 ? count : 0ULL;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
- .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report,
+ .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser,
};
static const struct aca_info xgmi_v6_4_0_aca_info = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 1592c63b3099..a3bfc16de6d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -44,6 +44,7 @@ struct amdgpu_hive_info {
struct amdgpu_reset_domain *reset_domain;
atomic_t ras_recovery;
+ struct ras_event_manager event_mgr;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 51a14f6d93bd..fb2b394bb9c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -94,7 +94,8 @@ union amd_sriov_msg_feature_flags {
uint32_t reg_indirect_acc : 1;
uint32_t av1_support : 1;
uint32_t vcn_rb_decouple : 1;
- uint32_t reserved : 24;
+ uint32_t mes_info_enable : 1;
+ uint32_t reserved : 23;
} flags;
uint32_t all;
};
@@ -157,7 +158,7 @@ struct amd_sriov_msg_pf2vf_info_header {
uint32_t reserved[2];
};
-#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (48)
+#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49)
struct amd_sriov_msg_pf2vf_info {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
@@ -208,6 +209,8 @@ struct amd_sriov_msg_pf2vf_info {
struct amd_sriov_msg_uuid_info uuid_info;
/* PCIE atomic ops support flag */
uint32_t pcie_atomic_ops_support_flags;
+ /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */
+ uint32_t gpu_capacity;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
};
@@ -221,7 +224,7 @@ struct amd_sriov_msg_vf2pf_info_header {
uint32_t reserved[2];
};
-#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (70)
+#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (73)
struct amd_sriov_msg_vf2pf_info {
/* header contains size and version */
struct amd_sriov_msg_vf2pf_info_header header;
@@ -265,7 +268,9 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t version;
} ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
uint64_t dummy_page_addr;
-
+ /* FB allocated for guest MES to record UQ info */
+ uint64_t mes_info_addr;
+ uint32_t mes_info_size;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index d6f808acfb17..414ea3f560a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
+static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
+{
+ return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
+}
+
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
- /* VCN is shared by two partitions under CPX MODE */
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}
@@ -623,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
{
- u32 mask, inst_mask = adev->sdma.sdma_mask;
+ u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
int ret, i;
/* generally 1 AID supports 4 instances */
@@ -635,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
- if ((inst_mask & mask) == mask)
+ avail_inst = inst_mask & mask;
+ if (avail_inst == mask || avail_inst == 0x3 ||
+ avail_inst == 0xc)
adev->aid_mask |= (1 << i);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 72362df352f6..d552e013354c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1243,6 +1243,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
+ ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
ectx.ws_size = ws;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a3a643254d7a..cf1d5d462b67 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1375,14 +1375,14 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool cik_asic_supports_baco(struct amdgpu_device *adev)
+static int cik_asic_supports_baco(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
return amdgpu_dpm_is_baco_supported(adev);
default:
- return false;
+ return 0;
}
}
@@ -2210,6 +2210,8 @@ static const struct amd_ip_funcs cik_common_ip_funcs = {
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version cik_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index f24e34dc33d1..576baa9dbb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -435,6 +435,8 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
.soft_reset = cik_ih_soft_reset,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cik_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a3fccc4c1f43..6948ebda0fa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1228,6 +1228,8 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.soft_reset = cik_sdma_soft_reset,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
@@ -1290,7 +1292,7 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: is this a secure operation
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (CIK).
* Used by the amdgpu ttm implementation to move pages if
@@ -1300,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index c19681492efa..072643787384 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -433,6 +433,8 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
.soft_reset = cz_ih_soft_reset,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cz_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 221af054d874..b44fce44c066 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3333,6 +3333,8 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 69e8b0db6cf7..80b2e7f79acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3464,6 +3464,8 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.soft_reset = dce_v11_0_soft_reset,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 60d40201fdd1..db20012600f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -3154,6 +3154,8 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5a5fcc45e452..5b56100ec902 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3242,6 +3242,8 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.soft_reset = dce_v8_0_soft_reset,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f90905ef32c7..536287ddd2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -276,6 +276,99 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin");
+static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS3),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_HPD_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSQG_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL0_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_3),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_4),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_GPM_STAT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SPP_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_A),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_B),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
@@ -3964,7 +4057,7 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[40];
+ char fw_name[53];
char ucode_prefix[30];
const char *wks = "";
int err;
@@ -4490,6 +4583,22 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL);
}
+static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+ uint32_t *ptr;
+
+ ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr == NULL) {
+ DRM_ERROR("Failed to allocate memory for IP Dump\n");
+ adev->gfx.ip_dump = NULL;
+ adev->gfx.reg_count = 0;
+ } else {
+ adev->gfx.ip_dump = ptr;
+ adev->gfx.reg_count = reg_count;
+ }
+}
+
static int gfx_v10_0_sw_init(void *handle)
{
int i, j, k, r, ring_id = 0;
@@ -4518,7 +4627,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_pipe_per_me = 2;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -4642,6 +4751,8 @@ static int gfx_v10_0_sw_init(void *handle)
gfx_v10_0_gpu_early_init(adev);
+ gfx_v10_0_alloc_dump_mem(adev);
+
return 0;
}
@@ -4694,6 +4805,8 @@ static int gfx_v10_0_sw_fini(void *handle)
gfx_v10_0_free_microcode(adev);
+ kfree(adev->gfx.ip_dump);
+
return 0;
}
@@ -8317,7 +8430,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
reg_mem_engine = 0;
} else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
reg_mem_engine = 1; /* pfp */
}
@@ -9154,6 +9267,36 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+
+ if (!adev->gfx.ip_dump)
+ return;
+
+ for (i = 0; i < reg_count; i++)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_reg_list_10_1[i].reg_name,
+ adev->gfx.ip_dump[i]);
+}
+
+static void gfx_v10_ip_dump(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
+
+ if (!adev->gfx.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < reg_count; i++)
+ adev->gfx.ip_dump[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_10_1[i]));
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9170,6 +9313,8 @@ static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.set_clockgating_state = gfx_v10_0_set_clockgating_state,
.set_powergating_state = gfx_v10_0_set_powergating_state,
.get_clockgating_state = gfx_v10_0_get_clockgating_state,
+ .dump_ip_state = gfx_v10_ip_dump,
+ .print_ip_state = gfx_v10_ip_print,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
@@ -9186,7 +9331,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9421,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 1770e496c1b7..ad6431013c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -510,7 +510,7 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
- char ucode_prefix[30];
+ char ucode_prefix[25];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle)
gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
- WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
+ soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
@@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle)
for (i = 0; i < adev->gfx.me.num_me; ++i) {
for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
- tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
- WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
+ soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
}
}
}
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
@@ -5465,6 +5460,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
+ ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}
@@ -6173,6 +6169,8 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
@@ -6191,7 +6189,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@@ -6277,7 +6275,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 34f9211b2679..d0992ce9fb47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3457,6 +3457,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.soft_reset = gfx_v6_0_soft_reset,
.set_clockgating_state = gfx_v6_0_set_clockgating_state,
.set_powergating_state = gfx_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 86a4865b1ae5..541dbd70d8c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4977,6 +4977,8 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.soft_reset = gfx_v7_0_soft_reset,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 202ddda57f98..2f0e72caee1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6878,6 +6878,8 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
.get_clockgating_state = gfx_v8_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6f97a6d0e6d0..3c8c5abf35ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1249,7 +1249,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[50];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
@@ -1282,7 +1282,7 @@ out:
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[53];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -1337,7 +1337,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[30];
+ char fw_name[50];
int err;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
@@ -6856,6 +6856,8 @@ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.set_clockgating_state = gfx_v9_0_set_clockgating_state,
.set_powergating_state = gfx_v9_0_set_powergating_state,
.get_clockgating_state = gfx_v9_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
@@ -6981,7 +6983,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7020,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 065b2bd5f5a6..3f4fd2f08163 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1909,18 +1909,7 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
-static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
@@ -1934,5 +1923,4 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
.hw_ops = &gfx_v9_4_2_ras_ops,
},
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
- .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index b53c8fd4e8cf..7b16e8cca86a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -431,16 +431,16 @@ out:
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[15];
int r;
- chip_name = "gc_9_4_3";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
@@ -680,38 +680,44 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
};
-static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_smu_type type,
+ void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
u32 instlo;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
+ /* NOTE: overwrite info.die_id with xcd id for gfx */
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
- /* NOTE: overwrite info.die_id with xcd id for gfx */
- instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
- instlo &= GENMASK(31, 1);
- report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info,
+ ACA_ERROR_TYPE_UE, 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info,
+ ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -730,7 +736,7 @@ static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_b
}
static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
- .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
+ .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
};
@@ -2398,10 +2404,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable cgcg FSM(0x0000363F) */
+ /* CGCG Hysteresis: 400us */
def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
- data = (0x36
+ data = (0x2710
<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
@@ -2410,10 +2416,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
- /* set IDLE_POLL_COUNT(0x00900100) */
+ /* set IDLE_POLL_COUNT(0x33450100)*/
def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
- (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
} else {
@@ -4010,6 +4016,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
.set_powergating_state = gfx_v9_4_3_set_powergating_state,
.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 22175da0e16a..d200310d1731 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -443,6 +443,22 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev)
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
+static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ u32 status = 0;
+ struct amdgpu_vmhub *hub;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
+ return false;
+
+ hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ /* reset page fault status */
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+
+ return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+}
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
@@ -452,4 +468,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+ .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 49aecdcee006..77df8c9cbad2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -620,6 +620,20 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ u32 fed, status;
+
+ status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+ /* reset page fault status */
+ WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+
+ return fed;
+}
+
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
@@ -628,6 +642,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
+ .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
};
static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 23b478639921..3e38d8bfcb69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1115,6 +1115,8 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.soft_reset = gmc_v6_0_soft_reset,
.set_clockgating_state = gmc_v6_0_set_clockgating_state,
.set_powergating_state = gmc_v6_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3da7b6a2b00d..85df8fc81065 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1354,6 +1354,8 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.soft_reset = gmc_v7_0_soft_reset,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d20e5f20ee31..fc97757e33d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1717,6 +1717,8 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 47b63a4ce68b..c4ec1358f3aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -548,7 +548,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
{
bool retry_fault = !!(entry->src_data[1] & 0x80);
bool write_fault = !!(entry->src_data[1] & 0x20);
- uint32_t status = 0, cid = 0, rw = 0;
+ uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
const char *mmhub_cid;
@@ -664,6 +664,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
status = RREG32(hub->vm_l2_pro_fault_status);
cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+
+ /* for fed error, kfd will handle it, return directly */
+ if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
+ return 0;
+
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
@@ -1450,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
adev->umc.active_mask = adev->aid_mask;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
- adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 2c02ae69883d..07984f7c3ae7 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -425,6 +425,8 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.soft_reset = iceland_ih_soft_reset,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index ad4ad39f128f..3cb64c8f7175 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+ /* Redirect the interrupts to IH RB1 for dGPU */
+ if (adev->irq.ih1.ring_size) {
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
+ SOURCE_ID_MATCH_ENABLE, 0x1);
+
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ }
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -549,8 +564,15 @@ static int ih_v6_0_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- adev->irq.ih1.ring_size = 0;
- adev->irq.ih2.ring_size = 0;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
+ use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ }
/* initialize ih control register offset */
ih_v6_0_init_register_offset(adev);
@@ -748,6 +770,8 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.set_clockgating_state = ih_v6_0_set_clockgating_state,
.set_powergating_state = ih_v6_0_set_powergating_state,
.get_clockgating_state = ih_v6_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index b8da0fc29378..0fbf5fa7b0f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+ /* Redirect the interrupts to IH RB1 for dGPU */
+ if (adev->irq.ih1.ring_size) {
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
+ tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
+ SOURCE_ID_MATCH_ENABLE, 0x1);
+
+ WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ }
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -550,8 +565,15 @@ static int ih_v6_1_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- adev->irq.ih1.ring_size = 0;
- adev->irq.ih2.ring_size = 0;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
+ use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ }
/* initialize ih control register offset */
ih_v6_1_init_register_offset(adev);
@@ -753,6 +775,8 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.set_clockgating_state = ih_v6_1_set_clockgating_state,
.set_powergating_state = ih_v6_1_set_powergating_state,
.get_clockgating_state = ih_v6_1_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 7aed96fa10a9..aa6235dd4f2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -749,6 +749,8 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.set_clockgating_state = ih_v7_0_set_clockgating_state,
.set_powergating_state = ih_v7_0_set_powergating_state,
.get_clockgating_state = ih_v7_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 1c8116d75f63..ef3e42f6b841 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -759,6 +759,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 99cd49ee8ef6..afeaf3c64e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -632,6 +632,8 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
@@ -652,6 +654,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a92481da60cd..1c7cf4800bf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -557,6 +557,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 88ea58d5c4ab..237fe5df5a8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -719,6 +719,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 32caeb37cef9..d66af11aa66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -1053,6 +1053,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index edf5bcdd2bc9..da6bb9022b80 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -762,6 +762,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index e70200f97555..64c856bfe0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -513,6 +513,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 1e5ad1e08d2a..a626bf904926 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -1176,6 +1176,8 @@ static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.hw_fini = mes_v10_1_hw_fini,
.suspend = mes_v10_1_suspend,
.resume = mes_v10_1_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v10_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 072c478665ad..0d1407f25005 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -100,18 +100,76 @@ static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
};
+static const char *mes_v11_0_opcodes[] = {
+ "SET_HW_RSRC",
+ "SET_SCHEDULING_CONFIG",
+ "ADD_QUEUE",
+ "REMOVE_QUEUE",
+ "PERFORM_YIELD",
+ "SET_GANG_PRIORITY_LEVEL",
+ "SUSPEND",
+ "RESUME",
+ "RESET",
+ "SET_LOG_BUFFER",
+ "CHANGE_GANG_PRORITY",
+ "QUERY_SCHEDULER_STATUS",
+ "PROGRAM_GDS",
+ "SET_DEBUG_VMID",
+ "MISC",
+ "UPDATE_ROOT_PAGE_TABLE",
+ "AMD_LOG",
+};
+
+static const char *mes_v11_0_misc_opcodes[] = {
+ "WRITE_REG",
+ "INV_GART",
+ "QUERY_STATUS",
+ "READ_REG",
+ "WAIT_REG_MEM",
+ "SET_SHADER_DEBUGGER",
+};
+
+static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt)
+{
+ const char *op_str = NULL;
+
+ if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes))
+ op_str = mes_v11_0_opcodes[x_pkt->header.opcode];
+
+ return op_str;
+}
+
+static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
+{
+ const char *op_str = NULL;
+
+ if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
+ (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes)))
+ op_str = mes_v11_0_misc_opcodes[x_pkt->opcode];
+
+ return op_str;
+}
+
static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
int ndw = size / 4;
signed long r;
- union MESAPI__ADD_QUEUE *x_pkt = pkt;
+ union MESAPI__MISC *x_pkt = pkt;
struct MES_API_STATUS *api_status;
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
unsigned long flags;
- signed long timeout = adev->usec_timeout;
+ signed long timeout = 3000000; /* 3000 ms */
+ const char *op_str, *misc_op_str;
+ u32 fence_offset;
+ u64 fence_gpu_addr;
+ u64 *fence_ptr;
+ int ret;
+
+ if (x_pkt->header.opcode >= MES_SCH_API_MAX)
+ return -EINVAL;
if (amdgpu_emu_mode) {
timeout *= 100;
@@ -121,27 +179,52 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
}
BUG_ON(size % 4 != 0);
+ ret = amdgpu_device_wb_get(adev, &fence_offset);
+ if (ret)
+ return ret;
+ fence_gpu_addr =
+ adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
spin_lock_irqsave(&mes->ring_lock, flags);
if (amdgpu_ring_alloc(ring, ndw)) {
spin_unlock_irqrestore(&mes->ring_lock, flags);
+ amdgpu_device_wb_free(adev, fence_offset);
return -ENOMEM;
}
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
- api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
+ api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_value = 1;
amdgpu_ring_write_multiple(ring, pkt, ndw);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
- DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
+ op_str = mes_v11_0_get_op_string(x_pkt);
+ misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
+
+ if (misc_op_str)
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ else if (op_str)
+ dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
+ else
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
- timeout);
+ r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
+ amdgpu_device_wb_free(adev, fence_offset);
if (r < 1) {
- DRM_ERROR("MES failed to response msg=%d\n",
- x_pkt->header.opcode);
+
+ if (misc_op_str)
+ dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
+ op_str, misc_op_str);
+ else if (op_str)
+ dev_err(adev->dev, "MES failed to respond to msg=%s\n",
+ op_str);
+ else
+ dev_err(adev->dev, "MES failed to respond to msg=%d\n",
+ x_pkt->header.opcode);
while (halt_if_hws_hang)
schedule();
@@ -411,14 +494,47 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
- mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ if (amdgpu_mes_log_enable) {
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+ mes->event_log_gpu_addr;
+ }
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
}
+static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
+{
+ int size = 128 * PAGE_SIZE;
+ int ret = 0;
+ struct amdgpu_device *adev = mes->adev;
+ union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
+ memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
+
+ mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
+ mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
+
+ ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &mes->resource_1,
+ &mes->resource_1_gpu_addr,
+ &mes->resource_1_addr);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
+ return ret;
+ }
+
+ mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
+ mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
+ offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -1200,6 +1316,14 @@ static int mes_v11_0_hw_init(void *handle)
if (r)
goto failure;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
+ }
+ }
+
r = mes_v11_0_query_sched_status(&adev->mes);
if (r) {
DRM_ERROR("MES is busy\n");
@@ -1223,6 +1347,11 @@ failure:
static int mes_v11_0_hw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
+ &adev->mes.resource_1_addr);
+ }
return 0;
}
@@ -1288,6 +1417,8 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.hw_fini = mes_v11_0_hw_fini,
.suspend = mes_v11_0_suspend,
.resume = mes_v11_0_resume,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index c0fc44cdd658..7a1ff298417a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -559,6 +559,20 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
}
+static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
+ int hub_inst)
+{
+ u32 fed, status;
+
+ status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
+ fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+ /* reset page fault status */
+ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
+ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+
+ return fed;
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.get_fb_location = mmhub_v1_8_get_fb_location,
.init = mmhub_v1_8_init,
@@ -568,6 +582,7 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
+ .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status,
};
static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
@@ -706,28 +721,32 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
};
-static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
-
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
-
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
/* reference to smu driver if header file */
@@ -741,7 +760,7 @@ static int mmhub_v1_8_err_codes[] = {
};
static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -760,7 +779,7 @@ static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_b
}
static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
- .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report,
+ .aca_bank_parser = mmhub_v1_8_aca_bank_parser,
.aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index a2bd2c3b1ef9..0c7275bca8f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
timeout -= 10;
} while (timeout > 1);
+ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 77f5b55decf9..aba00d961627 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -309,6 +309,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
timeout -= 10;
} while (timeout > 1);
+ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
@@ -444,7 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
amdgpu_virt_fini_data_exchange(adev);
xgpu_nv_send_access_requests_with_param(adev,
IDH_RAS_POISON, block, 0, 0);
- amdgpu_virt_init_data_exchange(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4178f4e5dad7..b281462093f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -713,6 +713,8 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.set_clockgating_state = navi10_ih_set_clockgating_state,
.set_powergating_state = navi10_ih_set_powergating_state,
.get_clockgating_state = navi10_ih_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 4d7976b77767..12e54047bf79 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -110,7 +110,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -121,7 +121,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -199,7 +199,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -1131,4 +1131,6 @@ static const struct amd_ip_funcs nv_common_ip_funcs = {
.set_clockgating_state = nv_common_set_clockgating_state,
.set_powergating_state = nv_common_set_powergating_state,
.get_clockgating_state = nv_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 78a95f8f370b..f08a32c18694 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -169,7 +169,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
{
- return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
+ /* dbg_drv was renamed to had_drv in psp v14 */
+ return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV);
}
static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
@@ -177,6 +178,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
+static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV);
+}
static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
{
@@ -653,6 +658,7 @@ static const struct psp_funcs psp_v14_0_funcs = {
.bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
+ .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv,
.bootloader_load_sos = psp_v14_0_bootloader_load_sos,
.ring_create = psp_v14_0_ring_create,
.ring_stop = psp_v14_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 07e19caf2bc1..ac8a9b9b3e52 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -1113,6 +1113,8 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
@@ -1176,7 +1178,7 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: unused
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -1186,7 +1188,7 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2ad615be4bb3..b8ebdc4ae6f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1553,6 +1553,8 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
@@ -1616,7 +1618,7 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: unused
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -1626,7 +1628,7 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 43775cb67ff5..101038395c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2021,6 +2021,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return instance;
+
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
@@ -2448,7 +2451,7 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine (VEGA10/12).
* Used by the amdgpu ttm implementation to move pages if
@@ -2458,11 +2461,11 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 34237a1b1f2e..341b24d8320b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@ -1602,19 +1603,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
- DRAM_ECC_INT_ENABLE, 0);
- WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
- break;
- /* sdma ecc interrupt is enabled by default
- * driver doesn't need to do anything to
- * enable the interrupt */
- case AMDGPU_IRQ_STATE_ENABLE:
- default:
- break;
- }
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}
@@ -1954,7 +1945,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1964,11 +1955,11 @@ static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
@@ -2189,35 +2180,39 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
};
-static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle,
- struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
- u64 status, misc0;
+ struct aca_bank_info info;
+ u64 misc0;
int ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- if ((type == ACA_ERROR_TYPE_UE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
- (type == ACA_ERROR_TYPE_CE &&
- ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
-
- ret = aca_bank_info_decode(bank, &report->info);
- if (ret)
- return ret;
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
- misc0 = bank->regs[ACA_REG_IDX_MISC0];
- report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
}
- return 0;
+ return ret;
}
/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
- enum aca_error_type type, void *data)
+ enum aca_smu_type type, void *data)
{
u32 instlo;
@@ -2236,7 +2231,7 @@ static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_
}
static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
- .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report,
+ .aca_bank_parser = sdma_v4_4_2_aca_bank_parser,
.aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 883e8a1b8a40..b7d33d78bce0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -999,7 +999,8 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1805,7 +1806,7 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine (NAVI10).
* Used by the amdgpu ttm implementation to move pages if
@@ -1815,11 +1816,11 @@ static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 42f4bd250def..cc9e961f0078 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
@@ -835,7 +839,8 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1747,7 +1752,7 @@ static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1757,11 +1762,11 @@ static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 361835a61f2e..c833b6b8373b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -507,6 +507,13 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
/* Set up RESP_MODE to non-copy addresses */
temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
@@ -854,7 +861,8 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
+ if (!ring->is_mes_queue)
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1567,7 +1575,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: if a secure copy should be used
+ * @copy_flags: copy flags for the buffers
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1577,11 +1585,11 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 23e4ef4fff7c..85235470e872 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1409,9 +1409,9 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool si_asic_supports_baco(struct amdgpu_device *adev)
+static int si_asic_supports_baco(struct amdgpu_device *adev)
{
- return false;
+ return 0;
}
static enum amd_reset_method
@@ -2706,6 +2706,8 @@ static const struct amd_ip_funcs si_common_ip_funcs = {
.soft_reset = si_common_soft_reset,
.set_clockgating_state = si_common_set_clockgating_state,
.set_powergating_state = si_common_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version si_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9aa0e11ee673..11db5b755832 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -708,6 +708,8 @@ static const struct amd_ip_funcs si_dma_ip_funcs = {
.soft_reset = si_dma_soft_reset,
.set_clockgating_state = si_dma_set_clockgating_state,
.set_powergating_state = si_dma_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
@@ -761,7 +763,7 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
- * @tmz: is this a secure operation
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
@@ -771,7 +773,7 @@ static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
- bool tmz)
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
1, 0, 0, byte_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index cada9f300a7f..5237395e4fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -296,6 +296,8 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
.soft_reset = si_ih_soft_reset,
.set_clockgating_state = si_ih_set_clockgating_state,
.set_powergating_state = si_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs si_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 93f6772d1b24..481217c32d85 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -92,7 +92,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
- return r;
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c
new file mode 100644
index 000000000000..2a51a70d4846
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v14_0_2.h"
+#include "smuio/smuio_14_0_2_offset.h"
+#include "smuio/smuio_14_0_2_sh_mask.h"
+#include <linux/preempt.h>
+
+static u32 smuio_v14_0_2_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX);
+}
+
+static u32 smuio_v14_0_2_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA);
+}
+
+static u64 smuio_v14_0_2_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ u64 clock;
+ u64 clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
+
+ preempt_disable();
+ clock_counter_hi_pre = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ /* the clock counter may be udpated during polling the counters */
+ clock_counter_hi_after = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
+ if (clock_counter_hi_pre != clock_counter_hi_after)
+ clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ preempt_enable();
+
+ clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
+
+ return clock;
+}
+
+const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs = {
+ .get_rom_index_offset = smuio_v14_0_2_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v14_0_2_get_rom_data_offset,
+ .get_gpu_clock_counter = smuio_v14_0_2_get_gpu_clock_counter,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h
new file mode 100644
index 000000000000..6e617f832d90
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V14_0_2_H__
+#define __SMUIO_V14_0_2_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs;
+
+#endif /* __SMUIO_V14_0_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index dec81ccf6240..170f02e96717 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -143,7 +143,7 @@ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -156,7 +156,7 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode =
static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -502,7 +502,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
- bool baco_reset = false;
+ int baco_reset = 0;
bool connected_to_cpu = false;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -540,7 +540,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (ras && adev->ras_enabled &&
adev->pm.fw_version <= 0x283400)
- baco_reset = false;
+ baco_reset = 0;
} else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
}
@@ -620,7 +620,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
}
}
-static bool soc15_supports_baco(struct amdgpu_device *adev)
+static int soc15_supports_baco(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
@@ -628,13 +628,13 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20) {
if (adev->psp.sos.fw_version >= 0x80067)
return amdgpu_dpm_is_baco_supported(adev);
- return false;
+ return 0;
} else {
return amdgpu_dpm_is_baco_supported(adev);
}
break;
default:
- return false;
+ return 0;
}
}
@@ -1501,4 +1501,6 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = {
.set_clockgating_state = soc15_common_set_clockgating_state,
.set_powergating_state = soc15_common_set_powergating_state,
.get_clockgating_state= soc15_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 1444b7765e4b..282584a48be0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -88,6 +88,8 @@ struct soc15_ras_field_entry {
};
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+#define SOC15_REG_ENTRY_STR(ip, inst, reg) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, #reg }
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 581a3bd11481..fb6797467571 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -72,7 +72,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = {
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -80,7 +80,7 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x1;
+ else
+ adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
@@ -959,4 +985,6 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = {
.set_clockgating_state = soc21_common_set_clockgating_state,
.set_powergating_state = soc21_common_set_powergating_state,
.get_clockgating_state = soc21_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 056d4df8fa1f..3ac56a9645eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -146,6 +146,7 @@ struct ta_ras_mca_addr {
uint32_t ch_inst;
uint32_t umc_inst;
uint32_t node_inst;
+ uint32_t socket_id;
};
struct ta_ras_phy_addr {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 450b6e831509..24d49d813607 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -486,6 +486,8 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 77af4e25ff46..bfe61d86ee6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -28,27 +28,7 @@
#include "umc/umc_12_0_0_sh_mask.h"
#include "mp/mp_13_0_6_sh_mask.h"
-const uint32_t
- umc_v12_0_channel_idx_tbl[]
- [UMC_V12_0_UMC_INSTANCE_NUM]
- [UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
- {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
- {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
- {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
- {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
- {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
- {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
- {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
- {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
- };
-
-/* mapping of MCA error address to normalized address */
-static const uint32_t umc_v12_0_ma2na_mapping[] = {
- 0, 5, 6, 8, 9, 14, 12, 13,
- 10, 11, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28,
- 24, 7, 29, 30,
-};
+#define MAX_ECC_NUM_PER_RETIREMENT 32
static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
uint32_t node_inst,
@@ -192,99 +172,74 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static bool umc_v12_0_bit_wise_xor(uint32_t val)
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in)
{
- bool result = 0;
- int i;
+ uint32_t col, row, row_xor, bank, channel_index;
+ uint64_t soc_pa, retired_page, column, err_addr;
+ struct ta_ras_query_address_output addr_out;
- for (i = 0; i < 32; i++)
- result = result ^ ((val >> i) & 0x1);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return result;
-}
+ return;
+ }
+
+ soc_pa = addr_out.pa.pa;
+ bank = addr_out.pa.bank;
+ channel_index = addr_out.pa.channel_idx;
-static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst,
- struct ta_ras_query_address_output *addr_out)
-{
- uint32_t channel_index, i;
- uint64_t na, soc_pa;
- uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
- uint32_t bank0, bank1, bank2, bank3, bank;
-
- bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
- bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
- bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
- bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
+ row_xor = row ^ (0x1ULL << 13);
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+ /* include column bit 0 and 1 */
+ col &= 0x3;
+ col |= (column << 2);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
- /* apply bank hash algorithm */
- bank0 =
- bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
- bank1 =
- bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
- bank2 =
- bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
- bank3 =
- bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
- (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
- (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
-
- bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
- err_addr &= ~0x3c0ULL;
- err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
-
- na = 0x0;
- /* convert mca error address to normalized address */
- for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
- na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
-
- channel_index =
- adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
- adev->umc.channel_inst_num +
- umc_inst * adev->umc.channel_inst_num +
- ch_inst];
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_32KB_BLOCK(na) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(na);
-
- /* the umc channel bits are not original values, they are hashed */
- UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
-
- addr_out->pa.pa = soc_pa;
- addr_out->pa.bank = bank;
- addr_out->pa.channel_idx = channel_index;
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, addr_in->ma.umc_inst);
+ }
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint64_t err_addr,
- uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst)
+static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
+ struct ta_ras_query_address_input *addr_in,
+ uint64_t *pfns, int len)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
- struct ta_ras_query_address_input addr_in;
+ uint64_t soc_pa, retired_page, column, err_addr;
struct ta_ras_query_address_output addr_out;
+ uint32_t pos = 0;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch_inst;
- addr_in.ma.umc_inst = umc_inst;
- addr_in.ma.node_inst = node_inst;
-
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
- /* fallback to old path if fail to get pa from psp */
- umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
- node_inst, &addr_out);
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return 0;
+ }
soc_pa = addr_out.pa.pa;
bank = addr_out.pa.bank;
@@ -302,33 +257,42 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
dev_info(adev->dev,
"Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
}
+
+ return pos;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ struct ta_ras_query_address_input addr_in;
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
uint64_t mc_umc_addrt0;
- struct ras_err_data *err_data = (struct ras_err_data *)data;
uint64_t umc_reg_offset =
get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
@@ -357,8 +321,19 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- umc_v12_0_convert_error_address(adev, err_data, err_addr,
- ch_inst, umc_inst, node_inst);
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
+ else
+ addr_in.ma.socket_id = 0;
+
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch_inst;
+ addr_in.ma.umc_inst = umc_inst;
+ addr_in.ma.node_inst = node_inst;
+
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in);
}
/* clear umc status */
@@ -401,13 +376,20 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
return 0;
}
+#ifdef TO_BE_REMOVED
static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
+ struct ras_query_context qctx;
+
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
+ RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
+
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx);
amdgpu_mca_smu_log_ras_error(adev,
- AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx);
}
static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
@@ -418,12 +400,16 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
struct ras_err_info *err_info;
struct ras_err_addr *mca_err_addr, *tmp;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct ta_ras_query_address_input addr_in;
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
if (list_empty(&err_info->err_addr_list))
continue;
+ addr_in.ma.node_inst = err_info->mcm_info.die_id;
+ addr_in.ma.socket_id = err_info->mcm_info.socket_id;
+
list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
mc_umc_status = mca_err_addr->err_status;
if (mc_umc_status &&
@@ -439,6 +425,10 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo);
+ addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo);
+
dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
mca_ipid,
err_info->mcm_info.die_id,
@@ -447,10 +437,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
err_addr);
umc_v12_0_convert_error_address(adev,
- err_data, err_addr,
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- err_info->mcm_info.die_id);
+ err_data, &addr_in);
}
/* Delete error address node from list and free memory */
@@ -458,6 +445,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
}
}
}
+#endif
static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, void *ras_error_status)
@@ -498,43 +486,49 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
.query_ras_error_address = umc_v12_0_query_ras_error_address,
};
-static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
- struct aca_bank_report *report, void *data)
+static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
{
struct amdgpu_device *adev = handle->adev;
- u64 status;
+ struct aca_bank_info info;
+ enum aca_error_type err_type;
+ u64 status, count;
+ u32 ext_error_code;
int ret;
- ret = aca_bank_info_decode(bank, &report->info);
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if (umc_v12_0_is_deferred_error(adev, status))
+ err_type = ACA_ERROR_TYPE_DEFERRED;
+ else if (umc_v12_0_is_uncorrectable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_UE;
+ else if (umc_v12_0_is_correctable_error(adev, status))
+ err_type = ACA_ERROR_TYPE_CE;
+ else
+ return 0;
+
+ ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
- status = bank->regs[ACA_REG_IDX_STATUS];
- switch (type) {
- case ACA_ERROR_TYPE_UE:
- if (umc_v12_0_is_uncorrectable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- case ACA_ERROR_TYPE_CE:
- if (umc_v12_0_is_correctable_error(adev, status)) {
- report->count[type] = 1;
- }
- break;
- default:
- return -EINVAL;
- }
+ amdgpu_umc_update_ecc_status(adev,
+ bank->regs[ACA_REG_IDX_STATUS],
+ bank->regs[ACA_REG_IDX_IPID],
+ bank->regs[ACA_REG_IDX_ADDR]);
- return 0;
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+
+ return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
- .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+ .aca_bank_parser = umc_v12_0_aca_bank_parser,
};
const struct aca_info umc_v12_0_aca_info = {
.hwip = ACA_HWIP_TYPE_UMC,
- .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
.bank_ops = &umc_v12_0_aca_bank_ops,
};
@@ -554,6 +548,152 @@ static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common
return 0;
}
+static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint16_t hwid, mcatype;
+ struct ta_ras_query_address_input addr_in;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ uint64_t err_addr, hash_val = 0;
+ struct ras_ecc_err *ecc_err;
+ int count;
+ int ret;
+
+ hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
+ mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
+
+ if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ return 0;
+
+ if (!status)
+ return 0;
+
+ if (!umc_v12_0_is_deferred_error(adev, status))
+ return 0;
+
+ err_addr = REG_GET_FIELD(addr,
+ MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ dev_info(adev->dev,
+ "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
+ ipid,
+ MCA_IPID_2_SOCKET_ID(ipid),
+ MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_UMC_INST(ipid),
+ MCA_IPID_2_UMC_CH(ipid),
+ err_addr);
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
+ addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
+ addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
+
+ count = umc_v12_0_convert_err_addr(adev,
+ &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ ret = amdgpu_umc_build_pages_hash(adev,
+ page_pfn, count, &hash_val);
+ if (ret) {
+ dev_err(adev->dev, "Fail to build error pages hash\n");
+ return ret;
+ }
+
+ ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
+ if (!ecc_err)
+ return -ENOMEM;
+
+ ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
+ if (!ecc_err->err_pages.pfn) {
+ kfree(ecc_err);
+ return -ENOMEM;
+ }
+
+ memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
+ ecc_err->err_pages.count = count;
+
+ ecc_err->hash_index = hash_val;
+ ecc_err->status = status;
+ ecc_err->ipid = ipid;
+ ecc_err->addr = addr;
+
+ ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
+ if (ret) {
+ if (ret == -EEXIST)
+ con->umc_ecc_log.de_updated = true;
+ else
+ dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
+
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ return ret;
+ }
+
+ con->umc_ecc_log.de_updated = true;
+
+ return 0;
+}
+
+static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
+ struct ras_ecc_err *ecc_err, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t i = 0;
+ int ret = 0;
+
+ if (!err_data || !ecc_err)
+ return -EINVAL;
+
+ for (i = 0; i < ecc_err->err_pages.count; i++) {
+ ret = amdgpu_umc_fill_error_record(err_data,
+ ecc_err->addr,
+ ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ MCA_IPID_2_UMC_INST(ecc_err->ipid));
+ if (ret)
+ break;
+ }
+
+ err_data->de_count++;
+
+ return ret;
+}
+
+static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ struct radix_tree_root *ecc_tree;
+ int new_detected, ret, i;
+
+ ecc_tree = &con->umc_ecc_log.de_page_tree;
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
+ 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
+ if (ret) {
+ dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
+ break;
+ }
+ radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&con->umc_ecc_log.lock);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -561,8 +701,8 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
- .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
- .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+ .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
+ .update_ecc_status = umc_v12_0_update_ecc_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 5973bfb14fce..b4974793850b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,83 +55,38 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* bank bits in MCA error address */
-#define UMC_V12_0_MCA_B0_BIT 6
-#define UMC_V12_0_MCA_B1_BIT 7
-#define UMC_V12_0_MCA_B2_BIT 8
-#define UMC_V12_0_MCA_B3_BIT 9
+
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R13_BIT 35
-/* channel index bits in SOC physical address */
-#define UMC_V12_0_PA_CH4_BIT 12
-#define UMC_V12_0_PA_CH5_BIT 13
-#define UMC_V12_0_PA_CH6_BIT 14
-
-/* bank hash settings */
-#define UMC_V12_0_XOR_EN0 1
-#define UMC_V12_0_XOR_EN1 1
-#define UMC_V12_0_XOR_EN2 1
-#define UMC_V12_0_XOR_EN3 1
-#define UMC_V12_0_COL_XOR0 0x0
-#define UMC_V12_0_COL_XOR1 0x0
-#define UMC_V12_0_COL_XOR2 0x800
-#define UMC_V12_0_COL_XOR3 0x1000
-#define UMC_V12_0_ROW_XOR0 0x11111
-#define UMC_V12_0_ROW_XOR1 0x22222
-#define UMC_V12_0_ROW_XOR2 0x4444
-#define UMC_V12_0_ROW_XOR3 0x8888
-
-/* channel hash settings */
-#define UMC_V12_0_HASH_4K 0
-#define UMC_V12_0_HASH_64K 1
-#define UMC_V12_0_HASH_2M 1
-#define UMC_V12_0_HASH_1G 1
-#define UMC_V12_0_HASH_1T 1
-
-/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
- * hash bit is only effective when related setting is enabled
- */
-#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
- (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
-#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
- (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
-#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
- (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
- (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
- (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
- (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
- (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K))
-#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
- (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
- (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
- } while (0)
+
+#define MCA_UMC_HWID_V12_0 0x96
+#define MCA_UMC_MCATYPE_V12_0 0x0
#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
(((_ipid_lo) >> 12) & 0xF))
#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+#define MCA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+
+#define MCA_IPID_2_UMC_CH(ipid) \
+ (MCA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define MCA_IPID_2_UMC_INST(ipid) \
+ (MCA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define MCA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
-extern const uint32_t
- umc_v12_0_channel_idx_tbl[]
- [UMC_V12_0_UMC_INSTANCE_NUM]
- [UMC_V12_0_CHANNEL_INSTANCE_NUM];
-
extern struct amdgpu_umc_ras umc_v12_0_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index c4c77257710c..a32f87992f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -442,11 +442,6 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade
umc_v8_10_ecc_info_query_error_address, ras_error_status);
}
-static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr)
-{
- hdr->version = RAS_TABLE_VER_V2_1;
-}
-
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -460,5 +455,4 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
- .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
index 8e7b763cfdb7..bd57896ab85d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
@@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+ ring->wptr = 0;
+
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
@@ -248,7 +250,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -271,6 +273,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
+ set_hw_resources.collaboration_mask_vpe =
+ adev->vpe.collaborate_mode ? 0x3 : 0x0;
set_hw_resources.engine_mask = umsch->engine_mask;
set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
@@ -346,6 +350,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
add_queue.h_queue = input_ptr->h_queue;
add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
add_queue.is_context_suspended = input_ptr->is_context_suspended;
+ add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index a6006f231c65..805d6662c88b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -819,6 +819,8 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.soft_reset = uvd_v3_1_soft_reset,
.set_clockgating_state = uvd_v3_1_set_clockgating_state,
.set_powergating_state = uvd_v3_1_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version uvd_v3_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1aa09ad7bbe3..3f19c606f4de 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -769,6 +769,8 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.soft_reset = uvd_v4_2_soft_reset,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index f8b229b75435..efd903c21d48 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -877,6 +877,8 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index a9a6880f44e3..495de5068455 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1545,6 +1545,8 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index a08e7abca423..66fada199bda 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -626,6 +626,8 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.soft_reset = vce_v2_0_soft_reset,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index f4760748d349..32517c364cf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -913,6 +913,8 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
.get_clockgating_state = vce_v3_0_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index aaceecd558cf..cb253bd3a2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1902,6 +1902,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e357d8cf0c01..f18fd61c435e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -2008,6 +2008,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 1cd8a94b0fbc..baec14bde2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1901,6 +1901,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -1921,6 +1923,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 8f82fb887e9c..6b31cf4b8aac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -359,6 +359,7 @@ static int vcn_v3_0_hw_init(void *handle)
}
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -2230,6 +2231,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 832d15f7b5f6..ac1b8ead03b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -288,6 +288,7 @@ static int vcn_v4_0_hw_init(void *handle)
}
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -2130,6 +2131,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 203fa988322b..2279d8fce03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -1660,6 +1660,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 501e53e69f2a..81fb99729f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -237,6 +237,7 @@ static int vcn_v4_0_5_hw_init(void *handle)
goto done;
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -1752,6 +1753,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index bc60c554eb32..851975b5ce29 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -95,7 +95,7 @@ static int vcn_v5_0_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -154,7 +154,7 @@ static int vcn_v5_0_0_sw_fini(void *handle)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -203,6 +203,7 @@ static int vcn_v5_0_0_hw_init(void *handle)
goto done;
}
+ return 0;
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -334,7 +335,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
}
/**
@@ -438,7 +439,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
@@ -615,7 +616,7 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
*/
static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -712,7 +713,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
*/
static int vcn_v5_0_0_start(struct amdgpu_device *adev)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int i, j, k, r;
@@ -893,7 +894,7 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
*/
static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int i, r = 0;
@@ -1328,6 +1329,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1a98812981f4..d39c670f6220 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -897,7 +897,7 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static bool vi_asic_supports_baco(struct amdgpu_device *adev)
+static int vi_asic_supports_baco(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
@@ -908,14 +908,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_TOPAZ:
return amdgpu_dpm_is_baco_supported(adev);
default:
- return false;
+ return 0;
}
}
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- bool baco_reset;
+ int baco_reset;
if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
@@ -935,7 +935,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
default:
- baco_reset = false;
+ baco_reset = 0;
break;
}
@@ -2058,6 +2058,8 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
.get_clockgating_state = vi_common_get_clockgating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version vi_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 769eb8f7bb3c..09315dd5a1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
+ /* setup collaborate mode */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
+ /* setup DPM */
+ if (amdgpu_vpe_configure_dpm(vpe))
+ dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
- amdgpu_vpe_psp_update_sram(adev);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
-
- return 0;
+ return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index dfa8c69532d4..6b713fb0b818 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -371,6 +371,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
err = -EINVAL;
goto err_wptr_map_gart;
}
+ if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ err = -EINVAL;
+ goto err_wptr_map_gart;
+ }
err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
if (err) {
@@ -779,8 +784,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
@@ -1523,7 +1528,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
/* Find a KFD GPU device that supports the get_dmabuf_info query */
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
- if (dev)
+ if (dev && !kfd_devcgroup_check_permission(dev))
break;
if (!dev)
return -EINVAL;
@@ -1545,7 +1550,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
if (xcp_id >= 0)
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
else
- args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
+ args->gpu_id = dev->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 041ec3de55e7..9596bca57212 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -435,12 +435,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!f2g) {
if (amdgpu_ip_version(adev, GC_HWIP, 0))
- dev_err(kfd_device,
+ dev_info(kfd_device,
"GC IP %06x %s not supported in kfd\n",
amdgpu_ip_version(adev, GC_HWIP, 0),
vf ? "VF" : "");
else
- dev_err(kfd_device, "%s %s not supported in kfd\n",
+ dev_info(kfd_device, "%s %s not supported in kfd\n",
amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
- int count;
if (!kfd->init_complete)
return;
@@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = ++kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
/* For first KFD device suspend all the KFD processes */
- if (count == 1)
+ if (++kfd_locked == 1)
kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count, i;
+ int ret, i;
if (!kfd->init_complete)
return 0;
@@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = --kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
+ if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f4d395e38683..c08b6ee25289 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1997,10 +1997,10 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
- if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
- dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 9a06c6fb6605..8e0d0356e810 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -134,6 +134,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -153,6 +154,8 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
@@ -160,6 +163,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
break;
@@ -170,17 +174,16 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
- if (!ret) {
+ if (!ret)
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
+ else
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v10(struct kfd_node *dev,
@@ -339,7 +342,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
@@ -367,10 +371,25 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ /* gfxhub */
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (hub_inst < 0)
+ hub_inst = 0;
+ }
+
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 7e2859736a55..f524a55eee11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -193,6 +193,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -212,10 +213,13 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC21_INTSRC_SDMA_ECC:
default:
block = AMDGPU_RAS_BLOCK__GFX;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
}
@@ -223,10 +227,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
- if (!ret)
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- else
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
@@ -328,7 +329,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 91dd5e045b51..e1c21d250611 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -144,7 +144,8 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
enum amdgpu_ras_block block = 0;
- int old_poison, ret = -EINVAL;
+ int old_poison;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
@@ -162,8 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE2SH:
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
- ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ break;
+ case SOC15_IH_CLIENTID_VMC:
+ case SOC15_IH_CLIENTID_VMC1:
+ block = AMDGPU_RAS_BLOCK__MMHUB;
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
@@ -171,27 +177,21 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
- break;
+ dev_warn(dev->adev->dev,
+ "client %d does not support poison consumption\n", client_id);
+ return;
}
kfd_signal_poison_consumed_event(dev, pasid);
- /* resetting queue passes, do page retirement without gpu reset
- * resetting queue fails, fallback to gpu reset solution
- */
- if (!ret) {
- dev_warn(dev->adev->dev,
- "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
- client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
- dev_warn(dev->adev->dev,
- "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
- client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+ dev_warn(dev->adev->dev,
+ "poison is consumed by client %d, kick off gpu reset flow\n", client_id);
+
+ amdgpu_amdkfd_ras_pasid_poison_consumption_handler(dev->adev,
+ block, pasid, NULL, NULL, reset);
}
static bool context_id_expected(struct kfd_dev *dev)
@@ -388,7 +388,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
@@ -413,10 +414,25 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ /* gfxhub */
+ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ node_id);
+ if (hub_inst < 0)
+ hub_inst = 0;
+ }
+
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index dd3c43c1ad70..9b6b6e882593 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -104,6 +104,8 @@ void kfd_interrupt_exit(struct kfd_node *node)
*/
flush_workqueue(node->ih_wq);
+ destroy_workqueue(node->ih_wq);
+
kfifo_free(&node->ih_fifo);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index bdc01ca9609a..4bcfbeac48fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -77,7 +77,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes, false);
+ dst_addr, num_bytes, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@@ -153,7 +153,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
- NULL, &next, false, true, false);
+ NULL, &next, false, true, 0);
if (r) {
dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
+
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 050a6936ff84..8746a61a852d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -290,3 +290,21 @@ uint64_t kfd_mqd_stride(struct mqd_manager *mm,
{
return mm->mqd_size;
}
+
+bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
+ uint32_t inst)
+{
+ if (doorbell_id) {
+ struct device *dev = node->adev->dev;
+
+ if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0)
+ dev_err(dev, "XCC %d: Queue preemption failed for queue with doorbell_id: %x\n",
+ inst, doorbell_id);
+ else
+ dev_err(dev, "Queue preemption failed for queue with doorbell_id: %x\n",
+ doorbell_id);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index e5cc697a3ca8..17cc1f25c8d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -119,7 +119,7 @@ struct mqd_manager {
#if defined(CONFIG_DEBUG_FS)
int (*debugfs_show_mqd)(struct seq_file *m, void *data);
#endif
- uint32_t (*read_doorbell_id)(void *mqd);
+ bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd);
uint64_t (*mqd_stride)(struct mqd_manager *mm,
struct queue_properties *p);
@@ -198,4 +198,6 @@ void kfd_get_hiq_xcc_mqd(struct kfd_node *dev,
uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev);
uint64_t kfd_mqd_stride(struct mqd_manager *mm,
struct queue_properties *q);
+bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id,
+ uint32_t inst);
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 1a4a69943c71..05f3ac2eaef9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -206,11 +206,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct cik_mqd *m = (struct cik_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
@@ -423,7 +423,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 22cbfa1bdadd..2eff37aaf827 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -224,11 +224,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -488,7 +488,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 826bc4f6c8a7..68dbc0399c87 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -278,11 +278,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -517,7 +517,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 697b6d530d12..6bddc16808d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -316,11 +316,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v9_mqd *m = (struct v9_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -607,6 +607,24 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
return err;
}
+static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
+{
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int inst = 0, xcc_id;
+ struct v9_mqd *m;
+ bool ret = false;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ m = get_mqd(mqd + hiq_mqd_size * inst);
+ ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
+ m->queue_doorbell_id0, inst);
+ ++inst;
+ }
+
+ return ret;
+}
+
static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
struct kfd_mem_obj *xcc_mqd_mem_obj,
uint64_t offset)
@@ -881,15 +899,16 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
+ mqd->check_preemption_failed = check_preemption_failed_v9_4_3;
} else {
mqd->init_mqd = init_mqd_hiq;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->destroy_mqd = destroy_hiq_mqd;
+ mqd->check_preemption_failed = check_preemption_failed;
}
break;
case KFD_MQD_TYPE_DIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 3e1a574d4ea6..c1fafc502515 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -237,11 +237,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static uint32_t read_doorbell_id(void *mqd)
+static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct vi_mqd *m = (struct vi_mqd *)mqd;
- return m->queue_doorbell_id0;
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
@@ -482,7 +482,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
- mqd->read_doorbell_id = read_doorbell_id;
+ mqd->check_preemption_failed = check_preemption_failed;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 42d40560cd30..a81ef232fdef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 717a60d7a4ea..58c1fe542193 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
@@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
- if (!signal_eviction_fence(p))
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- else
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2012,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f0f7f48af413..386875e6eb96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
- return r;
+ return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 92a5c5efcf92..9a5bcafbf730 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2851719d7121..73cb88121382 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -271,7 +274,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- u32 v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
struct amdgpu_crtc *acrtc = NULL;
struct dc *dc = adev->dm.dc;
@@ -845,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
*/
static void dm_dmub_outbox1_low_irq(void *interrupt_params)
{
- struct dmub_notification notify;
+ struct dmub_notification notify = {0};
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
@@ -1227,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
break;
}
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ break;
+ default:
+ break;
+ }
+
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@@ -1723,8 +1735,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- init_data.flags.disable_ips_in_vpb = 1;
+ init_data.flags.disable_ips_in_vpb = 0;
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -2626,6 +2640,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
memset(del_streams, 0, sizeof(del_streams));
@@ -2652,7 +2667,9 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_streams(dc, context->streams, context->stream_count);
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
fail:
dc_state_release(context);
@@ -2874,6 +2891,7 @@ static int dm_resume(void *handle)
struct dc_state *dc_state;
int i, r, j, ret;
bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
if (dm->dc->caps.ips_support) {
dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
@@ -2923,7 +2941,9 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2940,7 +2960,7 @@ static int dm_resume(void *handle)
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
- dm_state->context = dc_state_create(dm->dc);
+ dm_state->context = dc_state_create(dm->dc, NULL);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
/* Before powering on DC we need to re-initialize DMUB. */
@@ -3026,6 +3046,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
@@ -3044,6 +3065,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -3096,6 +3121,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version dm_ip_block = {
@@ -4820,9 +4847,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -5700,8 +5729,8 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
if (!adjust_colour_depth_from_display_info(timing_out, info) &&
drm_mode_is_420_also(info, mode_in) &&
@@ -5921,6 +5950,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@@ -6305,27 +6337,22 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- stream->signal == SIGNAL_TYPE_EDP) {
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
- if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
- if (stream->link->psr_settings.psr_feature_enabled)
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6792,7 +6819,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
if (!dc_plane_state)
goto cleanup;
- dc_state = dc_state_create(dc);
+ dc_state = dc_state_create(dc, NULL);
if (!dc_state)
goto cleanup;
@@ -7181,7 +7208,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
int i, j, ret;
- int vcpi, pbn_div, pbn, slot_num = 0;
+ int vcpi, pbn_div, pbn = 0, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -8394,13 +8421,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
- bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
- bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
- bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
- bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
}
amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
@@ -8613,7 +8640,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.output_csc_transform =
&acrtc_state->stream->csc_color_matrix;
bundle->stream_update.out_transfer_func =
- acrtc_state->stream->out_transfer_func;
+ &acrtc_state->stream->out_transfer_func;
bundle->stream_update.lut3d_func =
(struct dc_3dlut *) acrtc_state->stream->lut3d_func;
bundle->stream_update.func_shaper =
@@ -8764,10 +8791,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
-notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8847,6 +8874,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct drm_connector *connector;
bool mode_set_reset_required = false;
u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -8983,7 +9011,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -10587,7 +10615,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
trace_amdgpu_dm_atomic_check_begin(state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index c87b64e464ed..ebabfe3a512f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -571,7 +571,7 @@ static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = stream->out_transfer_func;
+ struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -954,8 +954,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,7 +963,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
regamma_size, has_rom);
if (r)
return r;
@@ -1034,7 +1034,7 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
&degamma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
/*
* This case isn't fully correct, but also fairly
@@ -1061,12 +1061,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.tf = tf;
else
- dc_plane_state->in_transfer_func->tf =
+ dc_plane_state->in_transfer_func.tf =
TRANSFER_FUNCTION_LINEAR;
- r = __set_input_tf(caps, dc_plane_state->in_transfer_func,
+ r = __set_input_tf(caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
@@ -1075,12 +1075,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
* For legacy gamma support we need the regamma input
* in linear space. Assume that the input is sRGB.
*/
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = tf;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED;
+ dc_plane_state->in_transfer_func.tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
!mod_color_calculate_degamma_params(caps,
- dc_plane_state->in_transfer_func,
+ &dc_plane_state->in_transfer_func,
NULL, false))
return -ENOMEM;
}
@@ -1114,24 +1114,24 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
return -EINVAL;
- dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf);
+ dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf);
if (has_degamma_lut) {
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_DISTRIBUTED_POINTS;
- ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func,
+ ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (ret)
return ret;
} else {
- dc_plane_state->in_transfer_func->type =
+ dc_plane_state->in_transfer_func.type =
TF_TYPE_PREDEFINED;
if (!mod_color_calculate_degamma_params(color_caps,
- dc_plane_state->in_transfer_func, NULL, false))
+ &dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
}
return 0;
@@ -1156,11 +1156,11 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
- amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func);
+ amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
amdgpu_tf_to_dc_tf(shaper_tf),
shaper_size,
- dc_plane_state->in_shaper_func);
+ &dc_plane_state->in_shaper_func);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d shaper LUT failed.\n",
@@ -1175,7 +1175,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
amdgpu_tf_to_dc_tf(blend_tf),
- blend_size, dc_plane_state->blend_tf);
+ blend_size, &dc_plane_state->blend_tf);
if (ret) {
drm_dbg_kms(plane_state->plane->dev,
"setting plane %d gamma lut failed.\n",
@@ -1221,8 +1221,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
color_caps = &dc_plane_state->ctx->dc->caps.color;
/* Initially, we can just bypass the DGM block. */
- dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR;
/* After, we start to update values according to color props */
has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index eee4945653e2..fdbeef9720c9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
size_t size, loff_t *pos)
{
int r;
- uint8_t data[36];
+ uint8_t data[36] = {0};
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dm_crtc_state *acrtc_state;
uint32_t write_size = 36;
@@ -2936,7 +2936,7 @@ static int psr_read_residency(void *data, u64 *val)
{
struct amdgpu_dm_connector *connector = data;
struct dc_link *link = connector->dc_link;
- u32 residency;
+ u32 residency = 0;
link->dc->link_srv->edp_get_psr_residency(link, &residency);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 941e96f100f4..0b03e659fdf3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -791,25 +791,12 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
-{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
-
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
-
- return fec_overhead_multiplier_x1000;
-}
-
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static int kbps_to_peak_pbn(int kbps)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+ peak_kbps = div_u64(peak_kbps, 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -910,12 +897,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1011,7 +997,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1041,7 +1026,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1054,7 +1039,8 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_peak_pbn(
+ params[next_index].bw_range.max_kbps);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1083,7 +1069,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -1148,7 +1133,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1167,7 +1152,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1175,7 +1160,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1601,7 +1586,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int bpp, pbn, branch_max_throughput_mps = 0;
+ int pbn, branch_max_throughput_mps = 0;
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1651,11 +1636,34 @@ enum dc_status dm_dp_mst_is_port_support_mode(
}
}
} else {
- /* check if mode could be supported within full_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
- if (pbn > aconnector->mst_output_port->full_pbn)
+ /* Check if mode could be supported within max slot
+ * number of current mst link and full_pbn of mst links.
+ */
+ int pbn_div, slot_num, max_slot_num;
+ enum dc_link_encoding_format link_encoding;
+ uint32_t stream_kbps =
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->link));
+
+ pbn = kbps_to_peak_pbn(stream_kbps);
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ link_encoding = dc_link_get_highest_encoding_format(stream->link);
+ if (link_encoding == DC_LINK_ENCODING_DP_8b_10b)
+ max_slot_num = 63;
+ else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ max_slot_num = 64;
+ else {
+ DRM_DEBUG_DRIVER("Invalid link encoding format\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ if (slot_num > max_slot_num ||
+ pbn > aconnector->mst_output_port->full_pbn) {
+ DRM_DEBUG_DRIVER("Mode can not be supported within mst links!");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 37c820ab0fdb..fa84d34b7373 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,9 +46,6 @@
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
-#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
-#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
-
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 1f08c6564c3b..bfa090432ce2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
* amdgpu_dm_psr_enable() - enable psr f/w
* @stream: stream state
*
- * Return: true if success
*/
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
unsigned int vsync_rate_hz = 0;
@@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
power_opt |= psr_power_opt_z10_static_screen;
- return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+ dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+
+ if (link->ctx->dc->caps.ips_support)
+ dc_allow_idle_optimizations(link->ctx->dc, true);
}
/*
@@ -210,7 +212,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
}
/*
- * amdgpu_dm_psr_disable() - disable psr f/w
+ * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams
* if psr is enabled on any stream
*
* Return: true if success
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c84b..1fdfd183c0d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index d9e33c6bccd9..0005f5f8f34f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+{
+}
+
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+{
+}
+
/**** power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 16e72d623630..08c494a7a21b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
- dev->mode_config.max_height);
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 7991ae468f75..4e9fb1742877 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 6450853fea94..bc16db69a663 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -44,8 +44,6 @@
#include "bios_parser_common.h"
-#include "dc.h"
-
#define THREE_PERCENT_OF_10000 300
#define LAST_RECORD_TYPE 0xff
@@ -1731,6 +1729,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
return 0;
}
+
/**
* get_ss_entry_number_from_internal_ss_info_tbl_V3_1
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 05f392501c0a..517c976dbc19 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1594,8 +1594,6 @@ static bool bios_parser_is_device_id_supported(
return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
break;
}
-
- return false;
}
static uint32_t bios_parser_get_ss_entry_number(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 86f9198e7501..2bcae0643e61 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -399,7 +399,7 @@ static enum bp_result transmitter_control_v1_6(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
frev, crev) == false)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index cbae1be7b009..cc000833d300 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -225,7 +225,7 @@ static enum bp_result transmitter_control_fallback(
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
- uint8_t crev;
+ uint8_t crev = 0;
BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 9f0f25aee426..a2b4ff2cff16 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
- if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
@@ -329,15 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
case AMDGPU_FAMILY_GC_11_0_0: {
- struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
- if (clk_mgr == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
- dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base;
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+ dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
}
case AMDGPU_FAMILY_GC_11_0_1: {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index b77804cfde0f..2a5dd3a296b2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,8 +131,8 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz;
- int target_div = 600000;
+ int dp_ref_clk_khz = 600000;
+ int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 2a74e2d74909..369421e46c52 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -23,7 +23,6 @@
*
*/
-#include "reg_helper.h"
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 89b79dd39628..19897fa52e7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -26,7 +26,6 @@
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
-#include <linux/delay.h>
#include "rv1_clk_mgr_vbios_smu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 5ee87965a078..bb4f3bd7532e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 9c90090e7351..f77840dd051e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dispclk_khz == 0 ||
dc->debug.force_clock_mode & 0x1) {
+ /* this is from resume or boot up, if forced_clock cfg option
+ * used, we bypass program dispclk and DPPCLK, but need set them
+ * for S3.
+ */
+
force_reset = true;
+ /* force_clock_mode 0x1: force reset the clock even it is the
+ * same clock as long as it is in Passive level.
+ */
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
@@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
+ // if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
+ // always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index e3e1940198a9..5ef0879f6ad9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
j = -1;
- ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceed maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3271c8c7905d..8083a553c60e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d
clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- for (i = 0; i < MAX_PIPES * 2; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
}
@@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index bdbf18306698..3253115a153d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "dcn30_clk_mgr_smu_msg.h"
#include "clk_mgr_internal.h"
@@ -54,6 +53,7 @@
*/
static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- /* Log? */
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
@@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
if (IS_SMU_TIMEOUT(result)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index aa9fd1dc550a..191d8b969d19 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported FCLK DPM levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index ce1386e22576..12a7752758b8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 6904e95113c1..f201628e4e98 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/delay.h>
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index 047d19ea919c..78ca1e5c5e9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -37,34 +37,34 @@ typedef enum {
} WCK_RATIO_e;
typedef struct {
- uint32_t FClk;
- uint32_t MemClk;
- uint32_t Voltage;
- uint8_t WckRatio;
- uint8_t Spare[3];
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
} DfPstateTable314_t;
//Freq in MHz
//Voltage in milli volts with 2 fractional bits
typedef struct {
- uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
- uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
- uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
- uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
- uint32_t VClocks[NUM_VCN_DPM_LEVELS];
- uint32_t DClocks[NUM_VCN_DPM_LEVELS];
- uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
- DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
- uint8_t NumDcfClkLevelsEnabled;
- uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
- uint8_t NumSocClkLevelsEnabled;
- uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
- uint8_t NumDfPstatesEnabled;
- uint8_t spare[3];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
- uint32_t MinGfxClk;
- uint32_t MaxGfxClk;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
} DpmClocks314_t;
struct dcn314_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 879f1494c4cd..2d14346b680e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dcn315_smu.h"
#include "mp/mp_13_0_5_offset.h"
+#include "logger_types.h"
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
@@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define REG_NBIO(reg_name) \
(NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa46d8..20ca7afa9cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
@@ -480,7 +485,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
j = -1;
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
+ static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL,
+ "number of reported pstate levels exceeds maximum");
/* Find lowest DPM, FCLK is filled in reverse order*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index bec252e1dd27..b9e1f3e0b31d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -29,6 +29,7 @@
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
+#include "dcn32/dcn32_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -40,7 +41,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#include "dcn32/dcn32_clk_mgr.h"
#include "dml/dcn32/dcn32_fpu.h"
#define DCN_BASE__INST0_SEG1 0x000000C0
@@ -829,7 +829,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
- if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
+ if (dc->config.enable_auto_dpm_test_logs) {
dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
@@ -1199,11 +1199,19 @@ void dcn32_clk_mgr_construct(
clk_mgr->smu_present = false;
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ if (!clk_mgr->base.bw_params) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
+ if (!clk_mgr->wm_range_table) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
}
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index df244b175fdb..f2f60478b1a6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -49,6 +49,7 @@
*/
static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
{
+ const uint32_t initial_max_retries = max_retries;
uint32_t reg = 0;
do {
@@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
+ TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
if (param_out)
@@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
+ TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+
return reg;
}
@@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
+ TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
+
/* Wait for response */
if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index c76352a817de..5c44ab0e8667 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -37,10 +37,9 @@
#define DALSMC_Result_OK 0x1
void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
-void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
-void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index c378b879c76d..6c9b4e6491a5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -73,6 +73,14 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
#define REG(reg_name) \
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -244,7 +252,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, false);
+ if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
+ dcn35_smu_set_dtbclk(clk_mgr, false);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -409,11 +418,25 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
static void init_clk_states(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -423,7 +446,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -512,6 +544,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -709,7 +763,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
@@ -836,35 +890,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
-static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
-
- if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
- dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
- val = val & ~DMUB_IPS1_ALLOW_MASK;
- val = val & ~DMUB_IPS2_ALLOW_MASK;
- }
-
- if (!allow_idle) {
- val |= DMUB_IPS1_ALLOW_MASK;
- val |= DMUB_IPS2_ALLOW_MASK;
- }
-
- dcn35_smu_write_ips_scratch(clk_mgr, val);
-}
-
static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -884,13 +909,6 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
-static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-
- return dcn35_smu_read_ips_scratch(clk_mgr);
-}
-
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
init_clk_states(clk_mgr);
@@ -978,8 +996,6 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
- .set_idle_state = dcn35_set_ips_idle_state,
- .get_idle_state = dcn35_get_ips_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1056,6 +1072,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 9e588c56c570..1399b41dfd1c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -487,24 +487,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
return retv;
}
-
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
-{
- if (!clk_mgr->smu_present)
- return;
-
- REG_WRITE(MP1_SMN_C2PMSG_71, param);
- //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
-}
-
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
-{
- uint32_t retv;
-
- if (!clk_mgr->smu_present)
- return 0;
-
- retv = REG_READ(MP1_SMN_C2PMSG_71);
- //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
- return retv;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 2b8e6959a03d..06cd3cc6d36e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
-void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
-uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_35_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e7dc128f6284..eb2aa90b370b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,6 +36,7 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -212,7 +213,8 @@ static bool create_links(
connectors_num,
num_virtual_links);
- for (i = 0; i < connectors_num; i++) {
+ // condition loop on link_count to allow skipping invalid indices
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -386,6 +388,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
*perf_trace = NULL;
}
+static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
+{
+ if (!dc || !stream || !adjust)
+ return false;
+
+ if (!dc->current_state)
+ return false;
+
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ if (dc->hwss.set_long_vtotal)
+ dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
* @dc: dc reference
@@ -420,6 +446,15 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
stream->adjust.v_total_min = adjust->v_total_min;
+ stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
+
+ if (dc->caps.max_v_total != 0 &&
+ (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ if (adjust->allow_otg_v_count_halt)
+ return set_long_vtotal(dc, stream, adjust);
+ else
+ return false;
+ }
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1055,8 +1090,7 @@ static bool dc_construct(struct dc *dc,
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
-
- dc->current_state = dc_state_create(dc);
+ dc->current_state = dc_state_create(dc, NULL);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
@@ -1272,7 +1306,7 @@ static void disable_vbios_mode_if_required(
if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
unsigned int enc_inst, tg_inst = 0;
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (enc_inst != ENGINE_ID_UNKNOWN) {
@@ -1759,7 +1793,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
if (dc_is_dp_signal(link->connector_signal)) {
- unsigned int pix_clk_100hz;
+ unsigned int pix_clk_100hz = 0;
uint32_t numOdmPipes = 1;
uint32_t id_src[4] = {0};
@@ -2085,15 +2119,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
-static bool commit_minimal_transition_state_legacy(struct dc *dc,
+static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
* dc_commit_streams - Commit current stream state
*
* @dc: DC object with the commit state to be configured in the hardware
- * @streams: Array with a list of stream state
- * @stream_count: Total of streams
+ * @params: Parameters for the commit, including the streams to be committed
*
* Function responsible for commit streams change to the hardware.
*
@@ -2101,9 +2134,7 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
* Return DC_OK if everything work as expected, otherwise, return a dc_status
* code.
*/
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count)
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
{
int i, j;
struct dc_state *context;
@@ -2112,18 +2143,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
+ if (!params)
+ return DC_ERROR_UNEXPECTED;
+
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
- if (!streams_changed(dc, streams, stream_count))
+ if (!streams_changed(dc, params->streams, params->stream_count) &&
+ dc->current_state->power_source == params->power_source)
return res;
dc_exit_ips_for_hw_access(dc);
- DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
+ DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
- for (i = 0; i < stream_count; i++) {
- struct dc_stream_state *stream = streams[i];
+ for (i = 0; i < params->stream_count; i++) {
+ struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
dc_stream_log(dc, stream);
@@ -2141,7 +2176,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
- if (stream_count > dc->current_state->stream_count &&
+ if (params->stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2151,13 +2186,15 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
if (handle_exit_odm2to1)
- res = commit_minimal_transition_state_legacy(dc, dc->current_state);
+ res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- res = dc_validate_with_context(dc, set, stream_count, context, false);
+ context->power_source = params->power_source;
+
+ res = dc_validate_with_context(dc, set, params->stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -2165,16 +2202,16 @@ enum dc_status dc_commit_streams(struct dc *dc,
res = dc_commit_state_no_check(dc, context);
- for (i = 0; i < stream_count; i++) {
+ for (i = 0; i < params->stream_count; i++) {
for (j = 0; j < context->stream_count; j++) {
- if (streams[i]->stream_id == context->streams[j]->stream_id)
- streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
+ if (params->streams[i]->stream_id == context->streams[j]->stream_id)
+ params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
- if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
+ if (dc_is_embedded_signal(params->streams[i]->signal)) {
+ struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
if (dc->hwss.is_abm_supported)
- status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
else
status->is_abm_supported = true;
}
@@ -2818,55 +2855,45 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->layer_index;
}
- if (srf_update->gamma &&
- (surface->gamma_correction !=
- srf_update->gamma)) {
- memcpy(&surface->gamma_correction->entries,
+ if (srf_update->gamma) {
+ memcpy(&surface->gamma_correction.entries,
&srf_update->gamma->entries,
sizeof(struct dc_gamma_entries));
- surface->gamma_correction->is_identity =
+ surface->gamma_correction.is_identity =
srf_update->gamma->is_identity;
- surface->gamma_correction->num_entries =
+ surface->gamma_correction.num_entries =
srf_update->gamma->num_entries;
- surface->gamma_correction->type =
+ surface->gamma_correction.type =
srf_update->gamma->type;
}
- if (srf_update->in_transfer_func &&
- (surface->in_transfer_func !=
- srf_update->in_transfer_func)) {
- surface->in_transfer_func->sdr_ref_white_level =
+ if (srf_update->in_transfer_func) {
+ surface->in_transfer_func.sdr_ref_white_level =
srf_update->in_transfer_func->sdr_ref_white_level;
- surface->in_transfer_func->tf =
+ surface->in_transfer_func.tf =
srf_update->in_transfer_func->tf;
- surface->in_transfer_func->type =
+ surface->in_transfer_func.type =
srf_update->in_transfer_func->type;
- memcpy(&surface->in_transfer_func->tf_pts,
+ memcpy(&surface->in_transfer_func.tf_pts,
&srf_update->in_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->func_shaper &&
- (surface->in_shaper_func !=
- srf_update->func_shaper))
- memcpy(surface->in_shaper_func, srf_update->func_shaper,
- sizeof(*surface->in_shaper_func));
+ if (srf_update->func_shaper)
+ memcpy(&surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(surface->in_shaper_func));
- if (srf_update->lut3d_func &&
- (surface->lut3d_func !=
- srf_update->lut3d_func))
- memcpy(surface->lut3d_func, srf_update->lut3d_func,
- sizeof(*surface->lut3d_func));
+ if (srf_update->lut3d_func)
+ memcpy(&surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(surface->lut3d_func));
if (srf_update->hdr_mult.value)
surface->hdr_mult =
srf_update->hdr_mult;
- if (srf_update->blend_tf &&
- (surface->blend_tf !=
- srf_update->blend_tf))
- memcpy(surface->blend_tf, srf_update->blend_tf,
- sizeof(*surface->blend_tf));
+ if (srf_update->blend_tf)
+ memcpy(&surface->blend_tf, srf_update->blend_tf,
+ sizeof(surface->blend_tf));
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
@@ -2897,14 +2924,13 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dst.height && update->dst.width)
stream->dst = update->dst;
- if (update->out_transfer_func &&
- stream->out_transfer_func != update->out_transfer_func) {
- stream->out_transfer_func->sdr_ref_white_level =
+ if (update->out_transfer_func) {
+ stream->out_transfer_func.sdr_ref_white_level =
update->out_transfer_func->sdr_ref_white_level;
- stream->out_transfer_func->tf = update->out_transfer_func->tf;
- stream->out_transfer_func->type =
+ stream->out_transfer_func.tf = update->out_transfer_func->tf;
+ stream->out_transfer_func.type =
update->out_transfer_func->type;
- memcpy(&stream->out_transfer_func->tf_pts,
+ memcpy(&stream->out_transfer_func.tf_pts,
&update->out_transfer_func->tf_pts,
sizeof(struct dc_transfer_func_distributed_points));
}
@@ -3017,14 +3043,8 @@ static void backup_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
scratch->plane_states[i] = *status->plane_states[i];
- scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
- scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
- scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
- scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
- scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
}
scratch->stream_state = *stream;
- scratch->out_transfer_func = *stream->out_transfer_func;
}
static void restore_planes_and_stream_state(
@@ -3039,16 +3059,67 @@ static void restore_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
*status->plane_states[i] = scratch->plane_states[i];
- *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
- *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
- *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
- *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
- *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
}
*stream = scratch->stream_state;
- *stream->out_transfer_func = scratch->out_transfer_func;
}
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+/**
+ * update_planes_and_stream_state() - The function takes planes and stream
+ * updates as inputs and determines the appropriate update type. If update type
+ * is FULL, the function allocates a new context, populates and validates it.
+ * Otherwise, it updates current dc context. The function will return both
+ * new_context and new_update_type back to the caller. The function also backs
+ * up both current and new contexts into corresponding dc state scratch memory.
+ * TODO: The function does too many things, and even conditionally allocates dc
+ * context memory implicitly. We should consider to break it down.
+ *
+ * @dc: Current DC state
+ * @srf_updates: an array of surface updates
+ * @surface_count: surface update count
+ * @stream: Corresponding stream to be updated
+ * @stream_update: stream update
+ * @new_update_type: [out] determined update type by the function
+ * @new_context: [out] new context allocated and validated if update type is
+ * FULL, reference to current context if update type is less than FULL.
+ *
+ * Return: true if a valid update is populated into new_context, false
+ * otherwise.
+ */
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -3072,9 +3143,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- backup_planes_and_stream_state(&dc->current_state->scratch, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
copy_stream_update_to_stream(dc, context, stream, stream_update);
@@ -3143,7 +3215,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- if (update_type >= UPDATE_TYPE_MED) {
+ if (update_type != UPDATE_TYPE_MED)
+ continue;
+ if (surface->update_flags.bits.clip_size_change ||
+ surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3160,19 +3235,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
BREAK_TO_DEBUGGER();
goto fail;
}
-
- for (i = 0; i < context->stream_count; i++) {
- struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
- context->streams[i]);
-
- if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
- resource_build_test_pattern_params(&context->res_ctx, otg_master);
- }
}
+ update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
*new_update_type = update_type;
- backup_planes_and_stream_state(&context->scratch, stream);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3261,12 +3330,26 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->pending_test_pattern) {
- dc_link_dp_set_test_pattern(stream->link,
+ /*
+ * test pattern params depends on ODM topology
+ * changes that we could be applying to front
+ * end. Since at the current stage front end
+ * changes are not yet applied. We can only
+ * apply test pattern in hw based on current
+ * state and populate the final test pattern
+ * params in new state. If current and new test
+ * pattern params are different as result of
+ * different ODM topology being used, it will be
+ * detected and handle during front end
+ * programming update.
+ */
+ dc->link_srv->dp_set_test_pattern(stream->link,
stream->test_pattern.type,
stream->test_pattern.color_space,
stream->test_pattern.p_link_settings,
stream->test_pattern.p_custom_pattern,
stream->test_pattern.cust_pattern_size);
+ resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
}
if (stream_update->dpms_off) {
@@ -3363,6 +3446,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (srf_updates[i].surface->flip_immediate)
continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
sizeof(flip_addr->dirty_rects));
@@ -3479,6 +3563,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+
dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
@@ -3536,7 +3621,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->block_sequence,
&(context->block_sequence_steps),
top_pipe_to_program,
- stream_status);
+ stream_status,
+ context);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -4065,24 +4151,14 @@ struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
};
-static void release_minimal_transition_state(struct dc *dc,
- struct dc_state *context, struct pipe_split_policy_backup *policy)
-{
- dc_state_release(context);
- /* restore previous pipe split and odm policy */
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = policy->mpc_policy;
- dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
- dc->debug.force_disable_subvp = policy->subvp_policy;
-}
-
-static struct dc_state *create_minimal_transition_state(struct dc *dc,
- struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = NULL;
- unsigned int i, j;
+ int i;
if (!dc->config.is_vmin_only_asic) {
policy->mpc_policy = dc->debug.pipe_split_policy;
@@ -4092,97 +4168,257 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
dc->debug.enable_single_display_2to1_odm_policy = false;
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *minimal_transition_context,
+ struct dc_state *base_context,
+ struct pipe_split_policy_backup *policy)
+{
+ restore_minimal_pipe_split_policy(dc, base_context, policy);
+ dc_state_release(minimal_transition_context);
+}
+
+static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
+{
+ uint8_t i;
+ int j;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < context->stream_count; i++) {
+ stream_status = &context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++)
+ stream_status->plane_states[j]->flip_immediate = false;
+ }
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = NULL;
minimal_transition_context = dc_state_create_copy(base_context);
if (!minimal_transition_context)
return NULL;
+ backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
- for (i = 0; i < minimal_transition_context->stream_count; i++) {
- struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
-
- for (j = 0; j < stream_status->plane_count; j++) {
- struct dc_plane_state *plane_state = stream_status->plane_states[j];
-
- /* force vsync flip when reconfiguring pipes to prevent underflow
- * and corruption
- */
- plane_state->flip_immediate = false;
- }
- }
+ /* prevent underflow and corruption when reconfiguring pipes */
+ force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
- /* this should never happen */
- release_minimal_transition_state(dc, minimal_transition_context, policy);
+ /*
+ * This should never happen, minimal transition state should
+ * always be validated first before adding pipe split features.
+ */
+ release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
BREAK_TO_DEBUGGER();
minimal_transition_context = NULL;
}
return minimal_transition_context;
}
+static bool is_pipe_topology_transition_seamless_with_intermediate_step(
+ struct dc *dc,
+ struct dc_state *initial_state,
+ struct dc_state *intermediate_state,
+ struct dc_state *final_state)
+{
+ return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
+ intermediate_state) &&
+ dc->hwss.is_pipe_topology_transition_seamless(dc,
+ intermediate_state, final_state);
+}
+
+static void swap_and_release_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_state *old = dc->current_state;
+ struct pipe_ctx *pipe_ctx;
+
+ /* Since memory free requires elevated IRQ, an interrupt
+ * request is generated by mem free. If this happens
+ * between freeing and reassigning the context, our vsync
+ * interrupt will call into dc and cause a memory
+ * corruption. Hence, we first reassign the context,
+ * then free the old context.
+ */
+ dc->current_state = new_context;
+ dc_state_release(old);
+
+ // clear any forced full updates
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+}
+
+static int initialize_empty_surface_updates(
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates)
+{
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+ int i;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < status->plane_count; i++)
+ srf_updates[i].surface = status->plane_states[i];
+
+ return status->plane_count;
+}
+
+static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context =
+ create_minimal_transition_state(dc, new_context,
+ &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = new state\n");
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(
+ dc, intermediate_context, new_context, &policy);
+ }
+ return success;
+}
+
+static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context;
+ struct dc_state *old_current_state = dc->current_state;
+ struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ int surface_count;
+
+ /*
+ * Both current and new contexts share the same stream and plane state
+ * pointers. When new context is validated, stream and planes get
+ * populated with new updates such as new plane addresses. This makes
+ * the current context no longer valid because stream and planes are
+ * modified from the original. We backup current stream and plane states
+ * into scratch space whenever we are populating new context. So we can
+ * restore the original values back by calling the restore function now.
+ * This restores back the original stream and plane states associated
+ * with the current state.
+ */
+ restore_planes_and_stream_state(&dc->scratch.current_state, stream);
+ dc_state_retain(old_current_state);
+ intermediate_context = create_minimal_transition_state(dc,
+ old_current_state, &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = current state\n");
+ surface_count = initialize_empty_surface_updates(
+ stream, srf_updates);
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(dc, intermediate_context,
+ old_current_state, &policy);
+ }
+ dc_state_release(old_current_state);
+ /*
+ * Restore stream and plane states back to the values associated with
+ * new context.
+ */
+ restore_planes_and_stream_state(&dc->scratch.new_state, stream);
+ return success;
+}
/**
- * commit_minimal_transition_state - Commit a minimal state based on current or new context
+ * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
+ * on current or new context
*
* @dc: DC structure, used to get the current state
- * @context: New context
+ * @new_context: New context
* @stream: Stream getting the update for the flip
+ * @srf_updates: Surface updates
+ * @surface_count: Number of surfaces
*
- * The function takes in current state and new state and determine a minimal transition state
- * as the intermediate step which could make the transition between current and new states
- * seamless. If found, it will commit the minimal transition state and update current state to
- * this minimal transition state and return true, if not, it will return false.
+ * The function takes in current state and new state and determine a minimal
+ * transition state as the intermediate step which could make the transition
+ * between current and new states seamless. If found, it will commit the minimal
+ * transition state and update current state to this minimal transition state
+ * and return true, if not, it will return false.
*
* Return:
* Return True if the minimal transition succeeded, false otherwise
*/
-static bool commit_minimal_transition_state(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *stream)
-{
- bool success = false;
- struct dc_state *minimal_transition_context;
- struct pipe_split_policy_backup policy;
-
- /* commit based on new context */
- minimal_transition_context = create_minimal_transition_state(dc,
- context, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = new state\n", __func__);
-
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
- }
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
- }
-
- if (!success) {
- /* commit based on current context */
- restore_planes_and_stream_state(&dc->current_state->scratch, stream);
- minimal_transition_context = create_minimal_transition_state(dc,
- dc->current_state, &policy);
- if (minimal_transition_context) {
- if (dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, minimal_transition_context) &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, minimal_transition_context, context)) {
- DC_LOG_DC("%s base = current state\n", __func__);
- success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
- }
- release_minimal_transition_state(dc, minimal_transition_context, &policy);
- }
- restore_planes_and_stream_state(&context->scratch, stream);
- }
-
- ASSERT(success);
+static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = commit_minimal_transition_based_on_new_context(
+ dc, new_context, stream, srf_updates,
+ surface_count);
+ if (!success)
+ success = commit_minimal_transition_based_on_current_context(dc,
+ new_context, stream);
+ if (!success)
+ DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
return success;
}
/**
- * commit_minimal_transition_state_legacy - Create a transition pipe split state
+ * commit_minimal_transition_state - Create a transition pipe split state
*
* @dc: Used to get the current state status
* @transition_base_context: New transition state
@@ -4199,7 +4435,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Return:
* Return false if something is wrong in the transition state.
*/
-static bool commit_minimal_transition_state_legacy(struct dc *dc,
+static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context;
@@ -4260,12 +4496,14 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
"Unknown");
+ dc_state_retain(transition_base_context);
transition_context = create_minimal_transition_state(dc,
transition_base_context, &policy);
if (transition_context) {
ret = dc_commit_state_no_check(dc, transition_context);
- release_minimal_transition_state(dc, transition_context, &policy);
+ release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
}
+ dc_state_release(transition_base_context);
if (ret != DC_OK) {
/* this should never happen */
@@ -4283,41 +4521,6 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
return true;
}
-/**
- * update_seamless_boot_flags() - Helper function for updating seamless boot flags
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @surface_count: Number of surfaces that have an updated
- * @stream: Corresponding stream to be updated in the current flip
- *
- * Updating seamless boot flags do not need to be part of the commit sequence. This
- * helper function will update the seamless boot flags on each flip (if required)
- * outside of the HW commit sequence (fast or slow).
- *
- * Return: void
- */
-static void update_seamless_boot_flags(struct dc *dc,
- struct dc_state *context,
- int surface_count,
- struct dc_stream_state *stream)
-{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
- /* Optimize seamless boot flag keeps clocks and watermarks high until
- * first flip. After first flip, optimization is required to lower
- * bandwidth. Important to note that it is expected UEFI will
- * only light up a single display on POST, therefore we only expect
- * one stream with seamless boot flag set.
- */
- if (stream->apply_seamless_boot_optimization) {
- stream->apply_seamless_boot_optimization = false;
-
- if (get_seamless_boot_stream_count(context) == 0)
- dc->optimized_required = true;
- }
- }
-}
-
static void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -4437,123 +4640,9 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-bool dc_update_planes_and_stream(struct dc *dc,
+static bool update_planes_and_stream_v1(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update)
-{
- struct dc_state *context;
- enum surface_update_type update_type;
- int i;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- /* In cases where MPO and split or ODM are used transitions can
- * cause underflow. Apply stream configuration with minimal pipe
- * split first to avoid unsupported transitions for active pipes.
- */
- bool force_minimal_pipe_splitting = 0;
- bool is_plane_addition = 0;
- bool is_fast_update_only;
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
- surface_count, stream_update, stream);
- force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
- dc,
- stream,
- srf_updates,
- surface_count,
- &is_plane_addition);
-
- /* on plane addition, minimal state is the current one */
- if (force_minimal_pipe_splitting && is_plane_addition &&
- !commit_minimal_transition_state_legacy(dc, dc->current_state))
- return false;
-
- if (!update_planes_and_stream_state(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- &update_type,
- &context))
- return false;
-
- /* on plane removal, minimal state is the new one */
- if (force_minimal_pipe_splitting && !is_plane_addition) {
- if (!commit_minimal_transition_state_legacy(dc, context)) {
- dc_state_release(context);
- return false;
- }
- update_type = UPDATE_TYPE_FULL;
- }
-
- if (dc->hwss.is_pipe_topology_transition_seamless &&
- !dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context)) {
- commit_minimal_transition_state(dc,
- context, stream);
- }
- update_seamless_boot_flags(dc, context, surface_count, stream);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- if (!stream_update &&
- dc->hwss.is_pipe_topology_transition_seamless &&
- !dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context)) {
- DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
- BREAK_TO_DEBUGGER();
- }
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
-
- if (dc->current_state != context) {
-
- /* Since memory free requires elevated IRQL, an interrupt
- * request is generated by mem free. If this happens
- * between freeing and reassigning the context, our vsync
- * interrupt will call into dc and cause a memory
- * corruption BSOD. Hence, we first reassign the context,
- * then free the old context.
- */
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- // clear any forced full updates
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
- return true;
-}
-
-void dc_commit_updates_for_stream(struct dc *dc,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
@@ -4573,35 +4662,13 @@ void dc_commit_updates_for_stream(struct dc *dc,
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs.
- */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
-
- dc_update_planes_and_stream(dc, srf_updates,
- surface_count, stream,
- stream_update);
- return;
- }
-
- if (update_type >= update_surface_trace_level)
- update_surface_trace(dc, srf_updates, surface_count);
-
-
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
- return;
+ return false;
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4618,7 +4685,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
}
-
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
@@ -4643,13 +4709,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
- return;
+ return false;
}
}
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
- update_seamless_boot_flags(dc, context, surface_count, stream);
if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
@@ -4690,9 +4755,252 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
+ return true;
+}
- return;
+static bool update_planes_and_stream_v2(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *context;
+ enum surface_update_type update_type;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ /* In cases where MPO and split or ODM are used transitions can
+ * cause underflow. Apply stream configuration with minimal pipe
+ * split first to avoid unsupported transitions for active pipes.
+ */
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
+ bool is_fast_update_only;
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ srf_updates,
+ surface_count,
+ &is_plane_addition);
+
+ /* on plane addition, minimal state is the current one */
+ if (force_minimal_pipe_splitting && is_plane_addition &&
+ !commit_minimal_transition_state(dc, dc->current_state))
+ return false;
+
+ if (!update_planes_and_stream_state(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_type,
+ &context))
+ return false;
+
+ /* on plane removal, minimal state is the new one */
+ if (force_minimal_pipe_splitting && !is_plane_addition) {
+ if (!commit_minimal_transition_state(dc, context)) {
+ dc_state_release(context);
+ return false;
+ }
+ update_type = UPDATE_TYPE_FULL;
+ }
+
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ commit_minimal_transition_state_in_dc_update(dc, context, stream,
+ srf_updates, surface_count);
+
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ if (!stream_update &&
+ dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ if (dc->current_state != context)
+ swap_and_release_current_context(dc, context, stream);
+ return true;
+}
+
+static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type)
+{
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ ASSERT(update_type < UPDATE_TYPE_FULL);
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update)
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+ else
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+}
+
+static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *new_context)
+{
+ ASSERT(update_type >= UPDATE_TYPE_FULL);
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
+ dc->current_state, new_context))
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ */
+ commit_minimal_transition_state_in_dc_update(dc, new_context,
+ stream, srf_updates, surface_count);
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ new_context);
+}
+
+static bool update_planes_and_stream_v3(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *new_context;
+ enum surface_update_type update_type;
+
+ /*
+ * When this function returns true and new_context is not equal to
+ * current state, the function allocates and validates a new dc state
+ * and assigns it to new_context. The function expects that the caller
+ * is responsible to free this memory when new_context is no longer
+ * used. We swap current with new context and free current instead. So
+ * new_context's memory will live until the next full update after it is
+ * replaced by a newer context. Refer to the use of
+ * swap_and_free_current_context below.
+ */
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
+ &new_context))
+ return false;
+
+ if (new_context == dc->current_state) {
+ commit_planes_and_stream_update_on_current_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type);
+ } else {
+ commit_planes_and_stream_update_with_new_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
+ }
+
+ return true;
+}
+
+bool dc_update_planes_and_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /*
+ * update planes and stream version 3 separates FULL and FAST updates
+ * to their own sequences. It aims to clean up frequent checks for
+ * update type resulting unnecessary branching in logic flow. It also
+ * adds a new commit minimal transition sequence, which detects the need
+ * for minimal transition based on the actual comparison of current and
+ * new states instead of "predicting" it based on per feature software
+ * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
+ * minimal transition sequence is made universal to any power saving
+ * features that would use extra free pipes such as Dynamic ODM/MPC
+ * Combine, MPO or SubVp. Therefore there is no longer a need to
+ * specially handle compatibility problems with transitions among those
+ * features as they are now transparent to the new sequence.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ return update_planes_and_stream_v3(dc, srf_updates,
+ surface_count, stream, stream_update);
+ return update_planes_and_stream_v2(dc, srf_updates,
+ surface_count, stream, stream_update);
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /* TODO: Since change commit sequence can have a huge impact,
+ * we decided to only enable it for DCN3x. However, as soon as
+ * we get more confident about this change we'll need to enable
+ * the new sequence for all ASICs.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
@@ -4735,8 +5043,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
void dc_power_down_on_boot(struct dc *dc)
{
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
- dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot) {
+
+ if (dc->caps.ips_support)
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.power_down_on_boot(dc);
+ }
}
void dc_set_power_state(
@@ -4874,11 +5187,15 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active)
return true;
}
-void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (allow != dc->idle_optimizations_allowed)
+ DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
+ dc->idle_optimizations_allowed, allow, caller_name);
+
if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return;
@@ -4893,10 +5210,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
-void dc_exit_ips_for_hw_access(struct dc *dc)
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
{
if (dc->caps.ips_support)
- dc_allow_idle_optimizations(dc, false);
+ dc_allow_idle_optimizations_internal(dc, false, caller_name);
}
bool dc_dmub_is_ips_idle_state(struct dc *dc)
@@ -5030,10 +5347,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
}
dc->clk_mgr->dc_mode_softmax_enabled = enable;
}
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr)
{
- if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 9c05b1a07142..5c1d3017aefd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -392,10 +392,10 @@ void get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
@@ -403,10 +403,10 @@ void get_hdr_visual_confirm_color(
is_sdr = true;
break;
case PIXEL_FORMAT_FP16:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
- } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
} else
@@ -558,9 +558,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status)
+ struct dc_stream_status *stream_status,
+ struct dc_state *context)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ec4bf9432bdb..15819416a2f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -340,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
return res_pool;
}
-void dc_destroy_resource_pool(struct dc *dc)
+void dc_destroy_resource_pool(struct dc *dc)
{
if (dc) {
if (dc->res_pool)
@@ -1457,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
controller_color_space = convert_dp_to_controller_color_space(
otg_master->stream->test_pattern.color_space);
+ if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE)
+ return;
+
odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
odm_slice_width = h_active / odm_cnt;
@@ -1485,6 +1488,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
+
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
@@ -1496,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return false;
}
- pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
- pipe_ctx->plane_state->format);
-
/* Timing borders are part of vactive that we are also supposed to skip in addition
* to any stream dst offset. Since dm logic assumes dst is in addressable
* space we need to add the left and top borders to dst offsets temporarily.
@@ -1510,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
/* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
/* depends on h_active */
calculate_recout(pipe_ctx);
@@ -1794,6 +1797,30 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
return free_pipe_idx;
}
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
+ !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
+ }
+
+ return free_pipe_idx;
+}
+
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
@@ -2168,50 +2195,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
}
}
-void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
+ struct pipe_ctx *otg_master, int stream_idx)
{
- struct pipe_ctx *otg_master;
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
- int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
+ int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
bool is_primary;
DC_LOGGER_INIT(dc->ctx->logger);
+ slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+ for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ plane_idx = -1;
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &state->res_ctx,
+ dpp_pipes);
+ for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+ is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
+ dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
+ if (is_primary)
+ plane_idx++;
+ resource_log_pipe(dc, dpp_pipes[dpp_idx],
+ stream_idx, slice_idx,
+ plane_idx, slice_count,
+ is_primary);
+ }
+ } else {
+ resource_log_pipe(dc, opp_heads[slice_idx],
+ stream_idx, slice_idx, plane_idx,
+ slice_count, true);
+ }
+
+ }
+}
+
+static int resource_stream_to_stream_idx(struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i, stream_idx = -1;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_idx = i;
+ break;
+ }
+
+ /* never return negative array index */
+ if (stream_idx == -1) {
+ ASSERT(0);
+ return 0;
+ }
+
+ return stream_idx;
+}
+
+void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+{
+ struct pipe_ctx *otg_master;
+ int stream_idx, phantom_stream_idx;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->streams[stream_idx]->is_phantom)
+ continue;
+
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
- if (!otg_master || otg_master->stream_res.tg == NULL) {
- DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
- return;
- }
- slice_count = resource_get_opp_heads_for_otg_master(otg_master,
- &state->res_ctx, opp_heads);
- for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
- plane_idx = -1;
- if (opp_heads[slice_idx]->plane_state) {
- dpp_count = resource_get_dpp_pipes_for_opp_head(
- opp_heads[slice_idx],
- &state->res_ctx,
- dpp_pipes);
- for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
- is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
- dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
- if (is_primary)
- plane_idx++;
- resource_log_pipe(dc, dpp_pipes[dpp_idx],
- stream_idx, slice_idx,
- plane_idx, slice_count,
- is_primary);
- }
- } else {
- resource_log_pipe(dc, opp_heads[slice_idx],
- stream_idx, slice_idx, plane_idx,
- slice_count, true);
- }
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ }
+ if (state->phantom_stream_count > 0) {
+ DC_LOG_DC(" | (phantom pipes) |");
+ for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
+ continue;
+ phantom_stream_idx = resource_stream_to_stream_idx(state,
+ state->stream_status[stream_idx].mall_stream_config.paired_stream);
+ otg_master = resource_get_otg_master_for_stream(
+ &state->res_ctx, state->streams[phantom_stream_idx]);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2266,6 +2334,9 @@ static bool update_pipe_params_after_odm_slice_count_change(
if (pool->funcs->build_pipe_pix_clk_params)
pool->funcs->build_pipe_pix_clk_params(otg_master);
+
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+
return result;
}
@@ -2624,13 +2695,19 @@ bool resource_append_dpp_pipes_for_plane_composition(
struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state)
{
+ bool success;
if (otg_master_pipe->plane_state == NULL)
- return add_plane_to_opp_head_pipes(otg_master_pipe,
+ success = add_plane_to_opp_head_pipes(otg_master_pipe,
plane_state, new_ctx);
else
- return acquire_secondary_dpp_pipes_and_add_plane(
+ success = acquire_secondary_dpp_pipes_and_add_plane(
otg_master_pipe, plane_state, new_ctx,
cur_ctx, pool);
+ if (success)
+ /* when appending a plane mpc slice count changes from 0 to 1 */
+ success = update_pipe_params_after_mpc_slice_count_change(
+ plane_state, new_ctx, pool);
+ return success;
}
void resource_remove_dpp_pipes_for_plane_composition(
@@ -2965,7 +3042,7 @@ bool resource_update_pipes_for_plane_with_slice_count(
int i;
int dpp_pipe_count;
int cur_slice_count;
- struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
bool result = true;
dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane,
@@ -3117,6 +3194,9 @@ static struct audio *find_first_free_audio(
{
int i, available_audio_count;
+ if (id == ENGINE_ID_UNKNOWN)
+ return NULL;
+
available_audio_count = pool->audio_count;
for (i = 0; i < available_audio_count; i++) {
@@ -3371,11 +3451,31 @@ static bool acquire_otg_master_pipe_for_stream(
* any free pipes already used in current context as this could tear
* down exiting ODM/MPC/MPO configuration unnecessarily.
*/
+
+ /*
+ * Try to acquire the same OTG master already in use. This is not
+ * optimal because resetting an enabled OTG master pipe for a new stream
+ * requires an extra frame of wait. However there are test automation
+ * and eDP assumptions that rely on reusing the same OTG master pipe
+ * during mode change. We have to keep this logic as is for now.
+ */
pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * Try to acquire a pipe not used in current resource context to avoid
+ * pipe swapping.
+ */
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ /*
+ * If pipe swapping is unavoidable, try to acquire pipe used as
+ * secondary DPP pipe in current state as we prioritize to support more
+ * streams over supporting MPO planes.
+ */
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
@@ -3990,7 +4090,7 @@ static void set_avi_info_frame(
}
if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
}
@@ -4992,3 +5092,39 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
return false;
}
+
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
+{
+ dml2_options->callbacks.dc = dc;
+ dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params;
+ dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
+ dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
+ dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
+ dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
+ dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count;
+ dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
+ dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count;
+ dml2_options->callbacks.get_opp_head = &resource_get_opp_head;
+ dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream;
+ dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master;
+ dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane;
+ dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
+ dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
+
+ dml2_options->svp_pstate.callbacks.dc = dc;
+ dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
+ dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
+ dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
+ dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
+ dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 5f6392ae31a6..cd6570a1e20e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
- notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 5cc7f8da209c..76bb05f4d6bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -188,8 +188,11 @@ static void init_state(struct dc *dc, struct dc_state *state)
}
/* Public dc_state functions */
-struct dc_state *dc_state_create(struct dc *dc)
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_options;
+#endif
struct dc_state *state = kvzalloc(sizeof(struct dc_state),
GFP_KERNEL);
@@ -198,10 +201,16 @@ struct dc_state *dc_state_create(struct dc *dc)
init_state(dc, state);
dc_state_construct(dc, state);
+ state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC;
#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2)
- dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+ if (dc->debug.using_dml2) {
+ dml2_opt->use_clock_dc_limits = false;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
+
+ dml2_opt->use_clock_dc_limits = true;
+ dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
+ }
#endif
kref_init(&state->refcount);
@@ -214,6 +223,7 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
struct kref refcount = dst_state->refcount;
#ifdef CONFIG_DRM_AMD_DC_FP
struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+ struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source;
#endif
dc_state_copy_internal(dst_state, src_state);
@@ -222,6 +232,10 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
dst_state->bw_ctx.dml2 = dst_dml2;
if (src_state->bw_ctx.dml2)
dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+
+ dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source;
+ if (src_state->bw_ctx.dml2_dc_power_source)
+ dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source);
#endif
/* context refcount should not be overridden */
@@ -245,6 +259,12 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_release(new_state);
return NULL;
}
+
+ if (src_state->bw_ctx.dml2_dc_power_source &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
#endif
kref_init(&new_state->refcount);
@@ -310,7 +330,6 @@ void dc_state_destruct(struct dc_state *state)
memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
state->dmub_cmd_count = 0;
memset(&state->perf_params, 0, sizeof(state->perf_params));
- memset(&state->scratch, 0, sizeof(state->scratch));
}
void dc_state_retain(struct dc_state *state)
@@ -327,6 +346,9 @@ static void dc_state_free(struct kref *kref)
#ifdef CONFIG_DRM_AMD_DC_FP
dml2_destroy(state->bw_ctx.dml2);
state->bw_ctx.dml2 = 0;
+
+ dml2_destroy(state->bw_ctx.dml2_dc_power_source);
+ state->bw_ctx.dml2_dc_power_source = 0;
#endif
kvfree(state);
@@ -341,7 +363,7 @@ void dc_state_release(struct dc_state *state)
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_state_add_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -370,7 +392,7 @@ enum dc_status dc_state_add_stream(
* dc_state_remove_stream() - Remove a stream from a dc_state.
*/
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream)
{
@@ -436,6 +458,15 @@ bool dc_state_add_plane(
goto out;
}
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)
@@ -586,7 +617,7 @@ bool dc_state_add_all_planes_for_stream(
*/
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream)
+ const struct dc_stream_state *stream)
{
uint8_t i;
@@ -680,7 +711,7 @@ void dc_state_release_phantom_stream(const struct dc *dc,
dc_stream_release(phantom_stream);
}
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane)
{
@@ -716,7 +747,7 @@ void dc_state_release_phantom_plane(const struct dc *dc,
}
/* add phantom streams to context and generate correct meta inside dc_state */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream)
@@ -742,7 +773,7 @@ enum dc_status dc_state_add_phantom_stream(struct dc *dc,
return res;
}
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream)
{
@@ -836,7 +867,7 @@ bool dc_state_add_all_phantom_planes_for_stream(
}
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -858,7 +889,7 @@ bool dc_state_remove_phantom_streams_and_planes(
}
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state)
{
int i;
@@ -869,3 +900,19 @@ void dc_state_release_phantom_streams_and_planes(
for (i = 0; i < state->phantom_plane_count; i++)
dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
}
+
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
+{
+ struct dc_stream_state *stream = NULL;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] && state->streams[i]->stream_id == id) {
+ stream = state->streams[i];
+ break;
+ }
+ }
+
+ return stream;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 51a970fcb5d0..5c7e4884cac2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -116,12 +116,7 @@ bool dc_stream_construct(struct dc_stream_state *stream,
update_stream_signal(stream, dc_sink_data);
- stream->out_transfer_func = dc_create_transfer_func();
- if (stream->out_transfer_func == NULL) {
- dc_sink_release(dc_sink_data);
- return false;
- }
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func.type = TF_TYPE_BYPASS;
dc_stream_assign_stream_id(stream);
@@ -131,10 +126,6 @@ bool dc_stream_construct(struct dc_stream_state *stream,
void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
- if (stream->out_transfer_func != NULL) {
- dc_transfer_func_release(stream->out_transfer_func);
- stream->out_transfer_func = NULL;
- }
}
void dc_stream_assign_stream_id(struct dc_stream_state *stream)
@@ -201,9 +192,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->sink)
dc_sink_retain(new_stream->sink);
- if (new_stream->out_transfer_func)
- dc_transfer_func_retain(new_stream->out_transfer_func);
-
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
@@ -319,7 +307,7 @@ bool dc_stream_set_cursor_attributes(
program_cursor_attributes(dc, stream, attributes);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -394,7 +382,7 @@ bool dc_stream_set_cursor_position(
program_cursor_position(dc, stream, position);
/* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations)
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
return true;
@@ -425,7 +413,7 @@ bool dc_stream_add_writeback(struct dc *dc,
dc_exit_ips_for_hw_access(dc);
- wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
+ wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
dwb->dwb_is_drc = false;
@@ -507,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ unsigned int i, j;
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 19140fb65787..067f6555cfdf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -41,25 +41,15 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
{
plane_state->ctx = ctx;
- plane_state->gamma_correction = dc_create_gamma();
- if (plane_state->gamma_correction != NULL)
- plane_state->gamma_correction->is_identity = true;
+ plane_state->gamma_correction.is_identity = true;
- plane_state->in_transfer_func = dc_create_transfer_func();
- if (plane_state->in_transfer_func != NULL) {
- plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- }
- plane_state->in_shaper_func = dc_create_transfer_func();
- if (plane_state->in_shaper_func != NULL) {
- plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
- }
+ plane_state->in_transfer_func.type = TF_TYPE_BYPASS;
- plane_state->lut3d_func = dc_create_3dlut_func();
+ plane_state->in_shaper_func.type = TF_TYPE_BYPASS;
- plane_state->blend_tf = dc_create_transfer_func();
- if (plane_state->blend_tf != NULL) {
- plane_state->blend_tf->type = TF_TYPE_BYPASS;
- }
+ plane_state->lut3d_func.state.raw = 0;
+
+ plane_state->blend_tf.type = TF_TYPE_BYPASS;
plane_state->pre_multiplied_alpha = true;
@@ -67,30 +57,27 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta
void dc_plane_destruct(struct dc_plane_state *plane_state)
{
- if (plane_state->gamma_correction != NULL) {
- dc_gamma_release(&plane_state->gamma_correction);
- }
- if (plane_state->in_transfer_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_transfer_func);
- plane_state->in_transfer_func = NULL;
- }
- if (plane_state->in_shaper_func != NULL) {
- dc_transfer_func_release(
- plane_state->in_shaper_func);
- plane_state->in_shaper_func = NULL;
- }
- if (plane_state->lut3d_func != NULL) {
- dc_3dlut_func_release(
- plane_state->lut3d_func);
- plane_state->lut3d_func = NULL;
- }
- if (plane_state->blend_tf != NULL) {
- dc_transfer_func_release(
- plane_state->blend_tf);
- plane_state->blend_tf = NULL;
+ // no more pointers to free within dc_plane_state
+}
+
+
+/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g.
+ * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new
+ * context rather than the existing one
+ */
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state)
+{
+ uint8_t pipe_mask = 0;
+ int i;
+
+ for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp)
+ pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst;
}
+ return pipe_mask;
}
/*******************************************************************************
@@ -103,7 +90,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
/*register_flip_interrupt(surface);*/
}
-struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index ee8453bf958f..3c33c3bcbe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,6 +44,8 @@
#include "dml2/dml2_wrapper.h"
+#include "dmub/inc/dmub_cmd.h"
+
struct abm_save_restore;
/* forward declaration */
@@ -51,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.273"
+#define DC_VER "3.2.281"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -219,6 +221,7 @@ struct dc_dmub_caps {
bool mclk_sw;
bool subvp_psr;
bool gecc_enable;
+ uint8_t fams_ver;
};
struct dc_caps {
@@ -306,12 +309,12 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
- //These bitfields to be used starting with DCN
+ //These bitfields to be used starting with DCN 3.0
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
- uint32_t dcc_256_128_128 : 1; //available starting with DCN
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
} dcc_controls;
};
@@ -435,6 +438,9 @@ struct dc_config {
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
bool usb4_bw_alloc_support;
+ bool allow_0_dtb_clk;
+ bool use_assr_psp_message;
+ bool support_edp0_on_dp1;
};
enum visual_confirm {
@@ -693,6 +699,8 @@ enum pg_hw_pipe_resources {
PG_MPCC,
PG_OPP,
PG_OPTC,
+ PG_DPSTREAM,
+ PG_HDMISTREAM,
PG_HW_PIPE_RESOURCES_NUM_ELEMENT
};
@@ -987,14 +995,17 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool optimize_ips_handshake;
bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
+ bool enable_idle_reg_checks;
unsigned int static_screen_wait_frames;
bool force_chroma_subsampling_1tap;
+ bool disable_422_left_edge_pixel;
+ unsigned int force_cositing;
};
-struct gpu_info_soc_bounding_box_v1_0;
/* Generic structure that can be used to query properties of DC. More fields
* can be added as required.
@@ -1003,76 +1014,6 @@ struct dc_current_properties {
unsigned int cursor_size_limit;
};
-struct dc {
- struct dc_debug_options debug;
- struct dc_versions versions;
- struct dc_caps caps;
- struct dc_cap_funcs cap_funcs;
- struct dc_config config;
- struct dc_bounding_box_overrides bb_overrides;
- struct dc_bug_wa work_arounds;
- struct dc_context *ctx;
- struct dc_phy_addr_space_config vm_pa_config;
-
- uint8_t link_count;
- struct dc_link *links[MAX_PIPES * 2];
- struct link_service *link_srv;
-
- struct dc_state *current_state;
- struct resource_pool *res_pool;
-
- struct clk_mgr *clk_mgr;
-
- /* Display Engine Clock levels */
- struct dm_pp_clock_levels sclk_lvls;
-
- /* Inputs into BW and WM calculations. */
- struct bw_calcs_dceip *bw_dceip;
- struct bw_calcs_vbios *bw_vbios;
- struct dcn_soc_bounding_box *dcn_soc;
- struct dcn_ip_params *dcn_ip;
- struct display_mode_lib dml;
-
- /* HW functions */
- struct hw_sequencer_funcs hwss;
- struct dce_hwseq *hwseq;
-
- /* Require to optimize clocks and bandwidth for added/removed planes */
- bool optimized_required;
- bool wm_optimized_required;
- bool idle_optimizations_allowed;
- bool enable_c20_dtm_b0;
-
- /* Require to maintain clocks and bandwidth for UEFI enabled HW */
-
- /* FBC compressor */
- struct compressor *fbc_compressor;
-
- struct dc_debug_data debug_data;
- struct dpcd_vendor_signature vendor_signature;
-
- const char *build_id;
- struct vm_helper *vm_helper;
-
- uint32_t *dcn_reg_offsets;
- uint32_t *nbio_reg_offsets;
- uint32_t *clk_reg_offsets;
-
- /* Scratch memory */
- struct {
- struct {
- /*
- * For matching clock_limits table in driver with table
- * from PMFW.
- */
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- } update_bw_bounding_box;
- } scratch;
-
- struct dml2_configuration_options dml2_options;
- enum dc_acpi_cm_power_state power_state;
-};
-
enum frame_buffer_mode {
FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
FRAME_BUFFER_MODE_ZFB_ONLY,
@@ -1277,6 +1218,8 @@ union surface_update_flags {
uint32_t raw;
};
+#define DC_REMOVE_PLANE_POINTERS 1
+
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
@@ -1291,8 +1234,8 @@ struct dc_plane_state {
struct dc_plane_dcc_param dcc;
- struct dc_gamma *gamma_correction;
- struct dc_transfer_func *in_transfer_func;
+ struct dc_gamma gamma_correction;
+ struct dc_transfer_func in_transfer_func;
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
@@ -1304,9 +1247,9 @@ struct dc_plane_state {
enum dc_color_space color_space;
- struct dc_3dlut *lut3d_func;
- struct dc_transfer_func *in_shaper_func;
- struct dc_transfer_func *blend_tf;
+ struct dc_3dlut lut3d_func;
+ struct dc_transfer_func in_shaper_func;
+ struct dc_transfer_func blend_tf;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
@@ -1342,6 +1285,7 @@ struct dc_plane_state {
struct tg_color visual_confirm_color;
bool is_statically_allocated;
+ enum chroma_cositing cositing;
};
struct dc_plane_info {
@@ -1360,6 +1304,96 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
+ enum chroma_cositing cositing;
+};
+
+#include "dc_stream.h"
+
+struct dc_scratch_space {
+ /* used to temporarily backup plane states of a stream during
+ * dc update. The reason is that plane states are overwritten
+ * with surface updates in dc update. Once they are overwritten
+ * current state is no longer valid. We want to temporarily
+ * store current value in plane states so we can still recover
+ * a valid current state during dc update.
+ */
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+
+ struct dc_stream_state stream_state;
+};
+
+struct dc {
+ struct dc_debug_options debug;
+ struct dc_versions versions;
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_config config;
+ struct dc_bounding_box_overrides bb_overrides;
+ struct dc_bug_wa work_arounds;
+ struct dc_context *ctx;
+ struct dc_phy_addr_space_config vm_pa_config;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_LINKS];
+ struct link_service *link_srv;
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ struct clk_mgr *clk_mgr;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* Require to optimize clocks and bandwidth for added/removed planes */
+ bool optimized_required;
+ bool wm_optimized_required;
+ bool idle_optimizations_allowed;
+ bool enable_c20_dtm_b0;
+
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+
+ /* FBC compressor */
+ struct compressor *fbc_compressor;
+
+ struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
+
+ const char *build_id;
+ struct vm_helper *vm_helper;
+
+ uint32_t *dcn_reg_offsets;
+ uint32_t *nbio_reg_offsets;
+ uint32_t *clk_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ struct dc_scratch_space current_state;
+ struct dc_scratch_space new_state;
+ struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ } scratch;
+
+ struct dml2_configuration_options dml2_options;
+ enum dc_acpi_cm_power_state power_state;
+
};
struct dc_scaling_info {
@@ -1476,10 +1510,15 @@ bool dc_acquire_release_mpc_3dlut(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-
-enum dc_status dc_commit_streams(struct dc *dc,
- struct dc_stream_state *streams[],
- uint8_t stream_count);
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
@@ -2335,11 +2374,17 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
- struct dc_cursor_attributes *cursor_attr);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr);
+
+#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__)
+#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__)
-void dc_allow_idle_optimizations(struct dc *dc, bool allow);
-void dc_exit_ips_for_hw_access(struct dc *dc);
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name);
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6083b1dcf050..2293a92df3be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -23,6 +23,7 @@
*
*/
+#include "dm_services.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/dmub_srv.h"
@@ -34,6 +35,7 @@
#include "resource.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -198,6 +200,11 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ if (!dmub->debug.timeout_occured) {
+ dmub->debug.timeout_occured = true;
+ dmub->debug.timeout_cmd = *cmd_list;
+ dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ }
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
@@ -904,12 +911,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_diagnostic_data diag_data = {0};
+ uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
+ DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
+
if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
@@ -933,7 +943,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
- DC_LOG_DEBUG(" pc : %08x", diag_data.pc);
+ for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
@@ -1199,8 +1210,23 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
return true;
}
+static int count_active_streams(const struct dc *dc)
+{
+ int i, count = 0;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (stream && !stream->dpms_off)
+ count += 1;
+ }
+
+ return count;
+}
+
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ volatile const struct dmub_shared_state_ips_fw *ips_fw;
struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
@@ -1211,6 +1237,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
return;
dc_dmub_srv = dc->ctx->dmub_srv;
+ ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
@@ -1226,6 +1253,12 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals new_signals;
+ DC_LOG_IPS(
+ "%s wait idle (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1245,19 +1278,46 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
+ /* TODO: Move this logic out to hwseq */
+ if (count_active_streams(dc) == 0) {
+ /* IPS2 - Display off */
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else {
+ /* RCG only */
+ new_signals.bits.allow_pg = 0;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 0;
+ new_signals.bits.allow_z10 = 0;
+ }
}
ips_driver->signals = new_signals;
}
+ DC_LOG_IPS(
+ "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
+ __func__,
+ allow_idle,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Register access should stop at this point. */
+ if (allow_idle)
+ dc_dmub_srv->needs_idle_wake = true;
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1274,40 +1334,113 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
+ rcg_exit_count = ips_fw->rcg_exit_count;
+ ips1_exit_count = ips_fw->ips1_exit_count;
+ ips2_exit_count = ips_fw->ips2_exit_count;
+
ips_driver->signals.all = 0;
- if (prev_driver_signals.bits.allow_ips2) {
- udelay(dc->debug.ips2_eval_delay_us);
+ DC_LOG_IPS(
+ "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ ips_driver->signals.bits.allow_ips1,
+ ips_driver->signals.bits.allow_ips2,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->rcg_entry_count,
+ ips_fw->ips1_entry_count,
+ ips_fw->ips2_entry_count);
+
+ /* Note: register access has technically not resumed for DCN here, but we
+ * need to be message PMFW through our standard register interface.
+ */
+ dc_dmub_srv->needs_idle_wake = false;
+
+ if (prev_driver_signals.bits.allow_ips2 &&
+ (!dc->debug.optimize_ips_handshake ||
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ DC_LOG_IPS(
+ "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
+ if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
+ udelay(dc->debug.ips2_eval_delay_us);
if (ips_fw->signals.bits.ips2_commit) {
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
+ DC_LOG_IPS(
+ "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips2_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+ DC_LOG_IPS(
+ "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1) {
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+
+ DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
+ __func__,
+ rcg_exit_count,
+ ips1_exit_count,
+ ips2_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
@@ -1335,21 +1468,42 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
if (dc_dmub_srv->idle_allowed == allow_idle)
return;
+ DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
+
/*
* Entering a low power state requires a driver notification.
* Powering up the hardware requires notifying PMFW and DMCUB.
* Clearing the driver idle allow requires a DMCUB command.
* DMCUB commands requires the DMCUB to be powered up and restored.
- *
- * Exit out early to prevent an infinite loop of DMCUB commands
- * triggering exit low power - use software state to track this.
*/
- dc_dmub_srv->idle_allowed = allow_idle;
- if (!allow_idle)
+ if (!allow_idle) {
+ dc_dmub_srv->idle_exit_counter += 1;
+
dc_dmub_srv_exit_low_power_state(dc);
- else
+ /*
+ * Idle is considered fully exited only after the sequence above
+ * fully completes. If we have a race of two threads exiting
+ * at the same time then it's safe to perform the sequence
+ * twice as long as we're not re-entering.
+ *
+ * Infinite command submission is avoided by using the
+ * dm_execute_dmub_cmd submission instead of the "wake" helpers.
+ */
+ dc_dmub_srv->idle_allowed = false;
+
+ dc_dmub_srv->idle_exit_counter -= 1;
+ if (dc_dmub_srv->idle_exit_counter < 0) {
+ ASSERT(0);
+ dc_dmub_srv->idle_exit_counter = 0;
+ }
+ } else {
+ /* Consider idle as notified prior to the actual submission to
+ * prevent multiple entries. */
+ dc_dmub_srv->idle_allowed = true;
+
dc_dmub_srv_notify_idle(dc, allow_idle);
+ }
}
bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
@@ -1384,7 +1538,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1433,8 +1588,10 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 952bfb368886..2c5866211f60 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -35,6 +35,7 @@ struct pipe_ctx;
struct dc_crtc_timing_adjust;
struct dc_crtc_timing;
struct dc_state;
+struct dc_surface_update;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -51,7 +52,9 @@ struct dc_dmub_srv {
struct dc_context *ctx;
void *dm;
+ int32_t idle_exit_counter;
bool idle_allowed;
+ bool needs_idle_wake;
};
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 1cb7765f593a..519c3df78ee5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -137,8 +137,13 @@ enum dp_link_encoding {
enum dp_test_link_rate {
DP_TEST_LINK_RATE_RBR = 0x06,
+ DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane
DP_TEST_LINK_RATE_HBR = 0x0A,
+ DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane
+ DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
DP_TEST_LINK_RATE_HBR2 = 0x14,
+ DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane
DP_TEST_LINK_RATE_HBR3 = 0x1E,
DP_TEST_LINK_RATE_UHBR10 = 0x01,
DP_TEST_LINK_RATE_UHBR20 = 0x02,
@@ -917,16 +922,6 @@ struct dpcd_usb4_dp_tunneling_info {
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
-#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
-#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
-#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
-#endif
-#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256
-#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
-#endif
-
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1232,8 +1227,7 @@ union replay_enable_and_configuration {
unsigned char FREESYNC_PANEL_REPLAY_MODE :1;
unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1;
unsigned char STATE_TRANSITION_ERROR_DETECTION :1;
- unsigned char RESERVED0 :1;
- unsigned char RESERVED1 :4;
+ unsigned char RESERVED :5;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index aae2f3a2660d..2ad7f60805f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -738,6 +738,13 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
+enum chroma_cositing {
+ CHROMA_COSITING_NONE,
+ CHROMA_COSITING_LEFT,
+ CHROMA_COSITING_TOPLEFT,
+ CHROMA_COSITING_COUNT
+};
+
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
@@ -974,6 +981,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_max;
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
+ uint32_t allow_otg_v_count_halt;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index ef380cae816a..44afcd989224 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "dc_hw_types.h"
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
index 9ee184c1df00..ab13335f1d01 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -30,5 +30,6 @@
void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
void dc_plane_destruct(struct dc_plane_state *plane_state);
+uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state);
#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
index d167fdbfa8a9..caa45db50232 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -29,7 +29,7 @@
#include "dc.h"
#include "inc/core_status.h"
-struct dc_state *dc_state_create(struct dc *dc);
+struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params);
void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
struct dc_state *dc_state_create_copy(struct dc_state *src_state);
void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
@@ -39,12 +39,12 @@ void dc_state_destruct(struct dc_state *state);
void dc_state_retain(struct dc_state *state);
void dc_state_release(struct dc_state *state);
-enum dc_status dc_state_add_stream(struct dc *dc,
+enum dc_status dc_state_add_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
enum dc_status dc_state_remove_stream(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
@@ -74,5 +74,5 @@ bool dc_state_add_all_planes_for_stream(
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
- struct dc_stream_state *stream);
+ const struct dc_stream_state *stream);
#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index c1f44e09a6c1..615086d74d32 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -29,6 +29,8 @@
#include "dc_state.h"
#include "dc_stream.h"
+struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id);
+
/* Get the type of the provided resource (none, phantom, main) based on the provided
* context. If the context is unavailable, determine only if phantom or not.
*/
@@ -45,7 +47,7 @@ struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *
struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
-struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
@@ -58,11 +60,11 @@ void dc_state_release_phantom_plane(const struct dc *dc,
struct dc_plane_state *phantom_plane);
/* add/remove phantom stream to context and generate subvp meta data */
-enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
-enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream);
@@ -92,11 +94,11 @@ bool dc_state_add_all_phantom_planes_for_stream(
struct dc_state *state);
bool dc_state_remove_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
void dc_state_release_phantom_streams_and_planes(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index ee10941caa59..e5dbbc6089a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -190,7 +190,7 @@ struct dc_stream_state {
PHYSICAL_ADDRESS_LOC dmdata_address;
bool use_dynamic_meta;
- struct dc_transfer_func *out_transfer_func;
+ struct dc_transfer_func out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -428,14 +428,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
/*
- * Set up streams and links associated to drive sinks
- * The streams parameter is an absolute set of all active streams.
- *
- * After this call:
- * Phy, Encoder, Timing Generator are programmed and enabled.
- * New streams are enabled with blank stream; no memory read.
- */
-/*
* Enable stereo when commit_streams is not required,
* for example, frame alternate.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index be2ac5c442a4..0f66d00ef80f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -422,7 +422,7 @@ struct dc_dwb_params {
enum dwb_capture_rate capture_rate; /* controls the frame capture rate */
struct scaling_taps scaler_taps; /* Scaling taps */
enum dwb_subsample_position subsample_position;
- struct dc_transfer_func *out_transfer_func;
+ const struct dc_transfer_func *out_transfer_func;
};
/* audio*/
@@ -1050,6 +1050,8 @@ union replay_error_status {
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
+ /* Replay caps support DPCD & EDID caps*/
+ bool replay_cap_support;
/* Power opt flags that are supported */
unsigned int replay_power_opt_supported;
/* SMU optimization is supported */
@@ -1175,4 +1177,20 @@ enum mall_stream_type {
SUBVP_MAIN, // subvp in use, this stream is main stream
SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
};
+
+enum dc_power_source_type {
+ DC_POWER_SOURCE_AC, // wall power
+ DC_POWER_SOURCE_DC, // battery power
+};
+
+struct dc_state_create_params {
+ enum dc_power_source_type power_source;
+};
+
+struct dc_commit_streams_params {
+ struct dc_stream_state **streams;
+ uint8_t stream_count;
+ enum dc_power_source_type power_source;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 970644b695cd..b5e0289d2fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ // Apply ssed(spread spectrum) dpref clock for edp only.
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+ && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+ && encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
- dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index a2f48d46d199..ee601a6897a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -22,9 +22,6 @@
* Authors: AMD
*
*/
-
-#include <linux/delay.h>
-
#include "resource.h"
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
@@ -315,9 +312,6 @@ static bool setup_engine(
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
-
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index f98400efdd9b..e34e445a4013 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -181,6 +181,7 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index bf1ffc3629c7..3d9be87aae45 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,6 +111,7 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 670d5ab9d998..2b1673d69ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default(
static void program_pwl(struct dce_transform *xfm_dce,
const struct pwl_params *params)
{
- int retval;
+ uint32_t retval;
uint8_t max_tries = 10;
uint8_t counter = 0;
uint32_t i = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index f9d6a181164a..b851fc65f5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -34,11 +34,7 @@
#include "reg_helper.h"
#include "fixed31_32.h"
-#ifdef _WIN32
-#include "atombios.h"
-#else
#include "atom.h"
-#endif
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index b010814706fe..4f559a025cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint16_t param = (uint16_t)(panel_inst << 8);
if (is_alpm)
- param |= REPLAY_RESIDENCY_MODE_ALPM;
+ param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM;
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index f0777d61c2cb..c307f040e48f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_opp_regamma_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 7e92effec894..683866797709 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
DCE112 = dce112_compressor.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1e3ef68a452a..8f508e662748 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,7 +24,7 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
DCE120 = dce120_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331accc0e..eede83ad91fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 7eefffbdc925..fba189d26652 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
DCE80 = dce80_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index ae6a131be71b..8dc7938c36d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,9 +24,9 @@
DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_dpp.o dcn10_opp.o \
+ dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
- dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index b7e57aa27361..0b49362f71b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -24,7 +24,7 @@
*/
#include "dc.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "dcn10_cm_common.h"
#include "custom_float.h"
@@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS) {
+ DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
+ i, TRANSFER_FUNC_POINTS);
+ return false;
+ }
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index d51f1ce02874..6dd355a03033 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high(
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
- unsigned int debug_data;
+ unsigned int debug_data = 0;
unsigned int i;
if (forced_pstate_allow) {
@@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks(
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks(
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks(
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 4201b7627030..d1f9e63944c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -409,7 +409,7 @@ struct dcn10_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
bool hubbub1_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
@@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub,
bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 09784222cc03..69119b2fdce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -692,6 +692,7 @@ struct dcn_hubp_state {
uint32_t primary_meta_addr_hi;
uint32_t uclk_pstate_force;
uint32_t hubp_cntl;
+ uint32_t flip_control;
};
struct dcn10_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 9033b39e0e0c..c51b717e5622 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in
remaining_buffer -= chars_printed;
pBuf += chars_printed;
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 377f1ba1a81b..4d0eed7598b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d980e6bd6c66..b7a89c39f445 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -167,7 +167,6 @@ struct dcn10_link_enc_registers {
uint32_t DIO_LINKD_CNTL;
uint32_t DIO_LINKE_CNTL;
uint32_t DIO_LINKF_CNTL;
- uint32_t DIG_FIFO_CTRL0;
uint32_t DIO_CLK_CNTL;
uint32_t DIG_BE_CLK_CNTL;
};
@@ -475,9 +474,6 @@ struct dcn10_link_enc_registers {
type HPO_DP_ENC_SEL;\
type HPO_HDMI_ENC_SEL
-#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \
- type DIG_FIFO_OUTPUT_PIXEL_MODE
-
#define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_BE_ENABLE;\
type DIG_RB_SWITCH_EN;\
@@ -512,7 +508,6 @@ struct dcn10_link_enc_shift {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
};
@@ -521,7 +516,6 @@ struct dcn10_link_enc_mask {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
- DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 5838a11efd00..71e9288d60ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding(
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
@@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding(
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 2c0ecfa5a643..c87de68a509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -79,6 +79,8 @@
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index c429590f1298..1b96972b9d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_1;
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
- uint32_t DIG_FE_CNTL2;
uint32_t DIG_FIFO_STATUS;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
@@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP11_ENABLE;\
type DP_SEC_GSP11_LINE_NUM
-#define SE_REG_FIELD_LIST_DCN3_2(type) \
+#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \
type DIG_FIFO_OUTPUT_PIXEL_MODE;\
type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\
type DIG_SYMCLK_FE_ON;\
@@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift {
uint8_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
SE_REG_FIELD_LIST_DCN3_0(uint8_t);
- SE_REG_FIELD_LIST_DCN3_2(uint8_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t);
};
@@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask {
uint32_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
SE_REG_FIELD_LIST_DCN3_0(uint32_t);
- SE_REG_FIELD_LIST_DCN3_2(uint32_t);
+ SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t);
};
@@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 3dae3943b056..9b6070c99794 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
-DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f8667be57046..80779e85e2c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
}
+
+ if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) {
+ /* Swap double buffered coefficient set */
+ uint32_t wbscl_mode = REG_READ(WBSCL_MODE);
+ bool coef_ram_current = get_reg_field_value_ex(
+ wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT,
+ dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT);
+
+ REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current);
+ }
+
}
static const struct dwbc_funcs dcn20_dwbc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 6eebcb22e317..c6f859871d11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 2f6146bf1d32..24a9c45988ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -85,7 +85,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 89c3bf0fe0c9..6bba020ad6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1331,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp)
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+ if (REG(DCHUBP_CNTL))
+ s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
static void hubp2_validate_dml_output(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index efa2adf4f83d..8da3084d933f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -147,7 +147,7 @@
uint32_t DCN_CUR1_TTU_CNTL1;\
uint32_t VMID_SETTINGS_0
-
+/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index b2b266953d18..c34e04cac9a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -147,7 +147,8 @@
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
@@ -231,6 +232,8 @@
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 16b5ff208d14..ea73473b970a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl(
MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
-
}
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
@@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
-
- REG_SEQ_SUBMIT();
- PERF_TRACE();
- REG_SEQ_WAIT_DONE();
- PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index 2b0b4f32e13b..3880db59e457 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
DCN201 = dcn201_hubbub.o\
- dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \
dcn201_dccg.o dcn201_link_encoder.o
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
index 037d265431c6..63798132ed95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
@@ -52,7 +52,7 @@
static bool hubbub201_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
- hubbub->detile_buf_size = 164 * 1024;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
index 35dd4bac242a..cd2bfcc51276 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ /* no need to program PTE */
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
@@ -99,6 +100,10 @@ static void hubp201_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
+ /*
+ * otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp201_program_requestor(hubp, rq_regs);
hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
index 8b95ef251332..be25e8dc0636 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
@@ -30,6 +30,10 @@
#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\
+ LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
@@ -44,7 +48,15 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
#define DPCS_DCN201_REG_LIST(id) \
- DPCS_DCN2_CMN_REG_LIST(id)
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
+ SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id)
void dcn201_link_encoder_construct(
struct dcn20_link_encoder *enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index aeb0e0d9b70a..2546224b326a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks(
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks(
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks(
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index d8eb2bb7282c..ab2ce0313529 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
bool hubbub21_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index b5b2aa3b3783..c6ca70f3c061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -25,13 +25,11 @@
DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
- dcn30_dpp.o \
dcn30_dccg.o \
dcn30_mpc.o dcn30_vpg.o \
dcn30_afmt.o \
dcn30_dio_stream_encoder.o \
dcn30_dwb.o \
- dcn30_dpp_cm.o \
dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index ddb344056d40..b8327237ed44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
#include "dcn30_cm_common.h"
#include "custom_float.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
index 35a613bb08bf..3f1da7f3a91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
@@ -29,15 +29,9 @@
#include "dcn20/dcn20_dccg.h"
-#define DCCG_REG_LIST_DCN3AG() \
- DCCG_COMMON_REG_LIST_DCN_BASE(),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL)
-
-
#define DCCG_REG_LIST_DCN30() \
DCCG_REG_LIST_DCN2(),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\
@@ -46,19 +40,10 @@
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL)
-#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \
- DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
- DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
-
#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
DCCG_MASK_SH_LIST_DCN2(mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 1fb8fd7afc95..b8e31b5ea114 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -30,8 +30,6 @@
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
#include "dc_bios_types.h"
-/* #include "dcn3ag/dcn3ag_phy_fw.h" */
-
#include "gpio_service_interface.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index f2d90f2b8bf1..5b6177c2ae98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -55,7 +55,8 @@
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh)
#define DPCS_DCN3_MASK_SH_LIST(mask_sh)\
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 005dbe099a7a..425b830b88d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -29,9 +29,6 @@
#include "reg_helper.h"
#include "hw_shared.h"
#include "dc.h"
-#include "core_types.h"
-#include <linux/delay.h>
-
#define DC_LOGGER \
enc1->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 1b9d9495f76d..fae98cf52020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
- .dwb_program_output_csc = NULL,
.dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
- .dwb_set_scaler = NULL,
};
void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index 332634b76aac..0f3f7c5fbaec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -217,6 +217,7 @@
SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\
SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\
SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
@@ -524,6 +525,7 @@
type DWB_OGAM_LUT_DATA;\
type DWB_OGAM_LUT_WRITE_COLOR_MASK;\
type DWB_OGAM_LUT_READ_COLOR_SEL;\
+ type DWB_OGAM_LUT_READ_DBG;\
type DWB_OGAM_LUT_HOST_SEL;\
type DWB_OGAM_LUT_CONFIG_MODE;\
type DWB_OGAM_LUT_STATUS;\
@@ -710,7 +712,7 @@
type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\
type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\
type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\
- type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS;
+ type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS
struct dcn30_dwbc_registers {
/* DCN3AG */
@@ -733,6 +735,10 @@ struct dcn30_dwbc_registers {
uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT;
uint32_t DWB_HOST_READ_CONTROL;
uint32_t DWB_SOFT_RESET;
+ uint32_t DWB_DEBUG_CTRL;
+ uint32_t DWB_DEBUG;
+ uint32_t DWB_TEST_DEBUG_INDEX;
+ uint32_t DWB_TEST_DEBUG_DATA;
/* DWBSCL */
uint32_t DWBSCL_COEF_RAM_TAP_SELECT;
@@ -747,6 +753,9 @@ struct dcn30_dwbc_registers {
uint32_t DWBSCL_DEST_SIZE;
uint32_t DWBSCL_OVERFLOW_STATUS;
uint32_t DWBSCL_OVERFLOW_COUNTER;
+ uint32_t DWBSCL_DEBUG;
+ uint32_t DWBSCL_TEST_DEBUG_INDEX;
+ uint32_t DWBSCL_TEST_DEBUG_DATA;
/* DWBCP */
uint32_t DWB_HDR_MULT_COEF;
@@ -838,6 +847,9 @@ struct dcn30_dwbc_registers {
uint32_t DWB_OGAM_RAMB_REGION_28_29;
uint32_t DWB_OGAM_RAMB_REGION_30_31;
uint32_t DWB_OGAM_RAMB_REGION_32_33;
+ uint32_t DWBCP_DEBUG;
+ uint32_t DWBCP_TEST_DEBUG_INDEX;
+ uint32_t DWBCP_TEST_DEBUG_DATA;
};
/* Internal enums / structs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
index 152c9c5733f1..6a5af3da4b45 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
@@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
index 7b597908b937..ca6233e8f1f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
@@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
bool hubbub3_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 75547ce86c09..60a64d290352 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp)
if (REG(DCHUBP_CNTL))
s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ if (REG(DCSURF_FLIP_CONTROL))
+ s->flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+
}
void hubp3_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index bf3386cd444d..fca94e50ae93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -44,6 +44,36 @@
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+void mpc3_mpc_init(struct mpc *mpc)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int opp_id;
+
+ mpc1_mpc_init(mpc);
+
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+ }
+}
+
+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc1_mpc_init_single_inst(mpc, mpcc_id);
+
+ /* assuming mpc out mux is connected to opp with the same index at this
+ * point in time (e.g. transitioning from vbios to driver)
+ */
+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+}
+
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
@@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control)
-{
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_RATE_CONTROL_DISABLE, !enable,
- MPC_OUT_RATE_CONTROL, rate_2x_mode);
-
- if (flow_control)
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
-}
-
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1172,7 +1183,7 @@ void mpc3_get_gamut_remap(struct mpc *mpc,
struct mpc_grph_gamut_adjustment *adjust)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
int select;
read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
@@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc1_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init = mpc3_mpc_init,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 9cb96ae95a2f..ce93003dae01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc3_mpc_init(
+ struct mpc *mpc);
+
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
@@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control);
-
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
index ed9a5549c389..466ba20b9c61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN30_VPG_H__
#define __DAL_DCN30_VPG_H__
+#include "vpg.h"
#define DCN30_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn30_vpg, base)
@@ -132,28 +133,6 @@ struct dcn30_vpg_mask {
VPG_DCN3_REG_FIELD_LIST(uint32_t);
};
-struct vpg;
-
-struct vpg_funcs {
- void (*update_generic_info_packet)(
- struct vpg *vpg,
- uint32_t packet_index,
- const struct dc_info_packet *info_packet,
- bool immediate_update);
-
- void (*vpg_poweron)(
- struct vpg *vpg);
-
- void (*vpg_powerdown)(
- struct vpg *vpg);
-};
-
-struct vpg {
- const struct vpg_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dcn30_vpg {
struct vpg base;
const struct dcn30_vpg_registers *regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
index 73db962dbc03..067e49cb238e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h
@@ -56,10 +56,4 @@ struct dccg *dccg301_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-struct dccg *dccg301_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *dccg_shift,
- const struct dccg_mask *dccg_mask);
-
#endif //__DCN301_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
index a046664e2031..c1959672df50 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
@@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
+ .init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index e3caaacf7493..e3be0bab4007 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -34,12 +34,14 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
+ DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
SR(PHYASYMCLK_CLOCK_CNTL),\
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL),\
SR(PHYDSYMCLK_CLOCK_CNTL),\
SR(PHYESYMCLK_CLOCK_CNTL),\
SR(DPSTREAMCLK_CNTL),\
+ SR(HDMISTREAMCLK_CNTL),\
SR(SYMCLK32_SE_CNTL),\
SR(SYMCLK32_LE_CNTL),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
@@ -78,6 +80,8 @@
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
+ DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
@@ -92,6 +96,8 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 26be5fee7411..b2cea59ba5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux(
}
}
-static void enc31_hw_init(struct link_encoder *enc)
+void enc31_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
index 221671563a0b..ee78ba80797c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -89,6 +89,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -222,6 +223,7 @@
SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH), \
SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
@@ -283,4 +285,6 @@ bool dcn31_link_encoder_is_in_alt_mode(
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings);
+void enc31_hw_init(struct link_encoder *enc);
+
#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 5b7ad38f85e0..03b4ac2f1991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -377,7 +377,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
*/
REG_WAIT(DP_DPHY_SYM32_STATUS,
SAT_UPDATE_PENDING, 0,
- 10, DP_SAT_UPDATE_MAX_RETRY);
+ 100, DP_SAT_UPDATE_MAX_RETRY);
}
void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x),
25));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 25) {
+ x += 1;
+ y = 0;
+ }
+
switch (stream_encoder_inst) {
case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 45143459eedd..678db949cfe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
&info_frame->hdrsmd,
true);
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
+
if (info_frame->adaptive_sync.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 5b5b5e0775fa..b906db6e7355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -172,7 +172,7 @@ static uint32_t convert_and_clamp(
static bool hubbub31_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks(
static bool hubbub31_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks(
static bool hubbub31_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks(
static bool hubbub31_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 281be20b1a10..20c6fe48567f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -173,5 +173,12 @@ void dcn31_panel_cntl_construct(
break;
}
- dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ //If supported, power sequencer mapping shall follow the DIG instance
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
+ else
+ /* If not supported, pwrseq will be assigned in order,
+ * so first pwrseq will be assigned to first panel instance (legacy behavior)
+ */
+ dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
index f1deb1c3c363..cfb923d85630 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg)
{
struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
- if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ uint32_t vpg_gsp_mem_pwr_state;
+
+ REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false &&
+ vpg_gsp_mem_pwr_state == 0)
return;
REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
index 0e76eabce441..609e58dbd056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -26,6 +26,7 @@
#ifndef __DAL_DCN31_VPG_H__
#define __DAL_DCN31_VPG_H__
+#include "vpg.h"
#define DCN31_VPG_FROM_VPG(vpg)\
container_of(vpg, struct dcn31_vpg, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 5314770fff1c..a58c37165f5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -11,7 +11,7 @@
# Makefile for dcn32.
DCN32 = dcn32_hubbub.o dcn32_dccg.o \
- dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
+ dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index e224a028d68a..d9ff95cd2dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -251,8 +251,6 @@ void dcn32_link_encoder_construct(
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 2d5f25290ed1..35d23d9db45e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -26,15 +26,7 @@
#ifndef __DC_LINK_ENCODER__DCN32_H__
#define __DC_LINK_ENCODER__DCN32_H__
-#include "dcn31/dcn31_dio_link_encoder.h"
-
-#define LE_DCN32_REG_LIST(id)\
- LE_DCN31_REG_LIST(id),\
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-#define LINK_ENCODER_MASK_SH_LIST_DCN32(mask_sh) \
- LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh),\
- LE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+#include "dcn30/dcn30_dio_link_encoder.h"
void dcn32_link_encoder_construct(
struct dcn20_link_encoder *enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 1be5410cce97..ca53d39561d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -177,11 +177,12 @@
SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
- SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
+ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh)
+
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 88dfc907553d..515c4c2b4c21 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -167,7 +167,7 @@ static uint32_t convert_and_clamp(
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks(
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks(
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks(
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
static bool hubbub32_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index f073839a4b6d..e439ba0fa30f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -118,25 +118,25 @@
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index e789e654c387..e408e859b355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
+ mpc3_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
@@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index f98def6c8c2d..fbcd6f7bc993 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -35,25 +35,6 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
-
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes)
-{
- uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
-
- /* add 2 lines for worst case alignment */
- cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
-
- total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- return num_ways;
-}
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -112,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(
if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
if (dc->debug.force_subvp_num_ways) {
return dc->debug.force_subvp_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
} else {
- return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
+ return 0;
}
} else {
return 0;
@@ -399,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 13be5f06d987..05783daa62ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -127,11 +127,6 @@ void dcn321_link_encoder_construct(
* while doing the DP sink detect
*/
-/* if (dal_adapter_service_is_feature_supported(as,
- FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
- enc10->base.features.flags.bits.
- DP_SINK_DETECT_POLL_DATA_PIN = true;*/
-
enc10->base.output_signals =
SIGNAL_TYPE_DVI_SINGLE_LINK |
SIGNAL_TYPE_DVI_DUAL_LINK |
@@ -191,7 +186,6 @@ void dcn321_link_encoder_construct(
__func__,
result);
}
- if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ if (enc10->base.ctx->dc->debug.hdmi20_disable)
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index 0e317e0c36a0..d5b4533d2f62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -13,7 +13,7 @@
DCN35 = dcn35_dio_stream_encoder.o \
dcn35_dio_link_encoder.o dcn35_dccg.o \
dcn35_hubp.o dcn35_hubbub.o \
- dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
+ dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index f1ba7bb792ea..58dd3c5bbff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
@@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
+ uint32_t dpp_inst, uint32_t enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ return;
+
+ switch (dpp_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ break;
+ default:
+ break;
+ }
+}
+
static void dccg35_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -333,21 +367,67 @@ static void dccg35_set_dpstreamclk(
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
case 0:
- REG_UPDATE_2(DPSTREAMCLK_CNTL,
- DPSTREAMCLK0_EN,
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+
+static void dccg35_set_dpstreamclk_root_clock_gating(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (dp_hpo_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0);
+ }
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0);
+ }
break;
default:
BREAK_TO_DEBUGGER();
@@ -355,6 +435,8 @@ static void dccg35_set_dpstreamclk(
}
}
+
+
static void dccg35_set_physymclk_root_clock_gating(
struct dccg *dccg,
int phy_inst,
@@ -369,22 +451,32 @@ static void dccg35_set_physymclk_root_clock_gating(
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -407,10 +499,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 1,
PHYASYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
PHYASYMCLK_EN, 0,
PHYASYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYA_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 1:
@@ -418,10 +516,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 1,
PHYBSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
PHYBSYMCLK_EN, 0,
PHYBSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYB_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 2:
@@ -429,10 +533,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 1,
PHYCSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
PHYCSYMCLK_EN, 0,
PHYCSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYC_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 3:
@@ -440,10 +550,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 1,
PHYDSYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
PHYDSYMCLK_EN, 0,
PHYDSYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYD_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
case 4:
@@ -451,10 +567,16 @@ static void dccg35_set_physymclk(
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 1,
PHYESYMCLK_SRC_SEL, clk_src);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 0);
} else {
REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
PHYESYMCLK_EN, 0,
PHYESYMCLK_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4,
+// PHYE_REFCLK_ROOT_GATE_DISABLE, 1);
}
break;
default:
@@ -491,12 +613,12 @@ static void dccg35_dpp_root_clock_control(
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
- dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0xFF,
DPPCLK0_DTO_MODULO, 0xFF);
} else {
- dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
+ dcn35_set_dppclk_enable(dccg, dpp_inst, 0);
/* turn on the DTO to generate a 0hz clock */
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
@@ -575,18 +697,32 @@ void dccg35_init(struct dccg *dccg)
dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- for (otg_inst = 0; otg_inst < 2; otg_inst++)
+ for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ }
+
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// for (otg_inst = 0; otg_inst < 4; otg_inst++)
+// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst);
+
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
- for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst,
+ for (otg_inst = 0; otg_inst < 4; otg_inst++) {
+ dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
+ dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ }
if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
for (otg_inst = 0; otg_inst < 5; otg_inst++)
dccg35_set_physymclk_root_clock_gating(dccg, otg_inst,
false);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ for (otg_inst = 0; otg_inst < 4; otg_inst++)
+ dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0);
+
/*
dccg35_enable_global_fgcg_rep(
dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -611,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -650,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0);
break;
default:
return;
@@ -682,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 1);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1);
break;
}
@@ -706,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 1,
SYMCLKA_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 1,
SYMCLKB_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 1,
SYMCLKC_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 1,
SYMCLKD_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 1,
SYMCLKE_FE_SRC_SEL, link_enc_inst);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1);
break;
}
}
@@ -786,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_FE_EN, 0,
SYMCLKA_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_FE_EN, 0,
SYMCLKB_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_FE_EN, 0,
SYMCLKC_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_FE_EN, 0,
SYMCLKD_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_FE_EN, 0,
SYMCLKE_FE_SRC_SEL, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0);
break;
}
@@ -818,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
case 0:
REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
SYMCLKA_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
SYMCLKB_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
SYMCLKC_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
SYMCLKD_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0);
break;
case 4:
REG_UPDATE(SYMCLKE_CLOCK_ENABLE,
SYMCLKE_CLOCK_ENABLE, 0);
+// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0);
break;
}
}
@@ -845,6 +1037,7 @@ static const struct dccg_funcs dccg35_funcs = {
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
+ .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating,
.enable_symclk32_se = dccg31_enable_symclk32_se,
.disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835b..20f810a6646c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode(
default:
return SIGNAL_TYPE_NONE;
}
- return SIGNAL_TYPE_NONE;
}
void dcn35_link_encoder_setup(
@@ -119,7 +118,7 @@ void dcn35_link_encoder_setup(
void dcn35_link_encoder_init(struct link_encoder *enc)
{
- enc32_hw_init(enc);
+ enc31_hw_init(enc);
dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio);
}
@@ -184,6 +183,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +239,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
index e1e560732a9d..d546a3676304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
@@ -37,7 +37,9 @@
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\
LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
@@ -114,7 +116,15 @@
LE_SF(DIO_CLK_CNTL, SYMCLK_FE_G_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_R_GATE_DIS, mask_sh),\
LE_SF(DIO_CLK_CNTL, SYMCLK_G_GATE_DIS, mask_sh),\
- LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh)
+ LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, DISPCLK_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKA_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKB_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKC_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKD_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKE_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKF_G_HDCP_GATE_DIS, mask_sh),\
+ LE_SF(DIO_CLK_CNTL, SYMCLKG_G_HDCP_GATE_DIS, mask_sh)
void dcn35_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
index 499052329ebb..1212fcee38f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
@@ -28,7 +28,6 @@
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "stream_encoder.h"
-#include "dcn10/dcn10_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
/* Register bit field name change */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
index 339bf0c722dd..6293173ba2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c
@@ -111,7 +111,7 @@ static uint32_t convert_and_clamp(
static bool hubbub35_program_stutter_z8_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
@@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
static bool hubbub35_program_watermarks(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 6d7a15dcf8a7..34adae7ab6e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -36,6 +36,7 @@
struct dc_dp_mst_stream_allocation_table;
struct aux_payload;
enum aux_return_code_type;
+enum set_config_status;
/*
* Allocate memory accessible by the GPU
@@ -200,7 +201,7 @@ int dm_helper_dmub_aux_transfer_sync(
const struct dc_link *link,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
-enum set_config_status;
+
int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index d0eed3b4771e..9405c47ee2a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -275,6 +275,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
#define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX)
/*
+ * SMU message tracing
+ */
+void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
+void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
+
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
+#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+
+
+/*
* DMUB Interfaces
*/
bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 38ab9ad60ef8..74da9ebda016 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1085,6 +1085,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
@@ -1092,8 +1095,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
- return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else if (is_pwrseq0 && (is_psr || is_replay))
+ return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else {
@@ -2369,7 +2372,7 @@ validate_out:
static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{
- struct _vcs_dpi_voltage_scaling_st low_pstate_lvl;
+ struct _vcs_dpi_voltage_scaling_st low_pstate_lvl = {0};
int i;
low_pstate_lvl.state = 1;
@@ -2474,7 +2477,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index ccb4ad78f667..81f7b90849ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -260,7 +260,7 @@ void dcn30_fpu_populate_dml_writeback_from_context(
int pipe_cnt, i, j;
double max_calc_writeback_dispclk;
double writeback_dispclk;
- struct writeback_st dout_wb;
+ struct writeback_st dout_wb = {0};
dc_assert_fp_enabled();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e7f4a2d491cc..e0b52db2c210 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3535,7 +3535,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index deb6d162a2d5..59a902313200 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -485,6 +485,7 @@ void dcn31_calculate_wm_and_dlg_fp(
{
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ uint32_t cstate_enter_plus_exit_z8_ns;
dc_assert_fp_enabled();
@@ -504,6 +505,13 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ cstate_enter_plus_exit_z8_ns =
+ get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000;
+
/* Set A:
* All clocks min required
*
@@ -514,7 +522,7 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 8f9c8faed260..d2ae43a82ba5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -30,6 +30,7 @@
#define DCN3_15_DEFAULT_DET_SIZE 192
#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_DEFAULT_DET_SIZE 192
+#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index adea459e7d36..33cf824c5da1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3679,7 +3679,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index fb21572750e8..21f637ae4add 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 88e56889a68c..3242957d00c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3788,7 +3788,6 @@ static double TruncToValidBPP(
return DesiredBPP;
}
}
- return BPP_INVALID;
}
static noinline void CalculatePrefetchSchedulePerPlane(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a0a65e099104..f6fe0a64beac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -180,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
+static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context,
+ bool *repopulate_pipes, int *split, bool *merge);
+
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
{
/* defaults */
@@ -622,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
@@ -720,7 +723,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
- struct pipe_ctx *subvp_pipes[2];
+ struct pipe_ctx *subvp_pipes[2] = {0};
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
@@ -1425,13 +1428,14 @@ static bool is_test_pattern_enabled(
return false;
}
-static void dcn32_full_validate_bw_helper(struct dc *dc,
+static bool dcn32_full_validate_bw_helper(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *vlevel,
int *split,
bool *merge,
- int *pipe_cnt)
+ int *pipe_cnt,
+ bool *repopulate_pipes)
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
@@ -1461,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->VoltageLevel = *vlevel;
}
+ /* Apply split and merge flags before checking for subvp */
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge))
+ return false;
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
* 2. Full update (i.e. !fast_validate)
@@ -1475,19 +1485,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
- dcn32_merge_pipes_for_subvp(dc, context);
- memset(merge, 0, MAX_PIPES * sizeof(bool));
-
vlevel_temp = *vlevel;
- /* to re-initialize viewport after the pipe merge */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state || !pipe_ctx->stream)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1576,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* add phantom pipes. If pipe split (ODM / MPC) is required, both the main
* and phantom pipes will be split in the regular pipe splitting sequence.
*/
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
@@ -1590,6 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
+ return true;
}
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
@@ -1929,106 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm(
return true;
}
-bool dcn32_internal_validate_bw(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
+static bool dcn32_apply_merge_split_flags_helper(
+ struct dc *dc,
+ struct dc_state *context,
+ bool *repopulate_pipes,
+ int *split,
+ bool *merge)
{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
+ int i, pipe_idx;
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx;
- int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
- dc_assert_fp_enabled();
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dc_state_remove_phantom_streams_and_planes(dc, context);
- dc_state_release_phantom_streams_and_planes(dc, context);
-
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
-
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
-
- if (!fast_validate)
- dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
-
- if (fast_validate ||
- (dc->debug.dml_disallow_alternate_prefetch_modes &&
- (vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
- /*
- * If dml_disallow_alternate_prefetch_modes is false, then we have already
- * tried alternate prefetch modes during full validation.
- *
- * If mode is unsupported or there is no p-state support, then
- * fall back to favouring voltage.
- *
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
- * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
- */
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_none;
-
- context->bw_ctx.dml.validate_max_state = fast_validate;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.validate_max_state = false;
-
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
- vba->VoltageLevel = vlevel;
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && !dc->config.enable_windowed_mpo_odm
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_state->clip_rect,
- &pipe->stream->src,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
if (dc->config.enable_windowed_mpo_odm) {
- repopulate_pipes = update_pipes_with_split_flags(
- dc, context, vba, split, merge);
+ if (update_pipes_with_split_flags(
+ dc, context, vba, split, merge))
+ *repopulate_pipes = true;
} else {
+
/* the code below will be removed once windowed mpo odm is fully
* enabled.
*/
@@ -2085,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2101,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
memset(&pipe->link_res, 0, sizeof(pipe->link_res));
- repopulate_pipes = true;
+ *repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -2140,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc,
hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, hsplit_pipe, odm))
- goto validate_fail;
+ return false;
newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
+ *repopulate_pipes = true;
}
if (split[i] == 4) {
struct pipe_ctx *pipe_4to1;
@@ -2163,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
@@ -2182,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
- goto validate_fail;
+ return false;
if (!dcn32_split_stream_for_mpc_or_odm(
dc, &context->res_ctx,
hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
+ return false;
newly_split[pipe_4to1->pipe_idx] = true;
}
if (odm)
@@ -2198,11 +2112,122 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->plane_state) {
if (!resource_build_scaling_params(pipe))
- goto validate_fail;
+ return false;
}
}
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
+ context->streams[i]);
+
+ if (otg_master)
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+ }
+ }
+ return true;
+}
+
+bool dcn32_internal_validate_bw(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ dc_assert_fp_enabled();
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ /* For each full update, remove all existing phantom pipes first */
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
+
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
+
+ if (!fast_validate) {
+ if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
+ &pipe_cnt, &repopulate_pipes))
+ goto validate_fail;
+ }
+
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
+ /*
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
+ *
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ */
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_none;
+
+ context->bw_ctx.dml.validate_max_state = fast_validate;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ context->bw_ctx.dml.validate_max_state = false;
+
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */
+ vba->VoltageLevel = vlevel;
+ }
}
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && !dc->config.enable_windowed_mpo_odm
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge))
+ goto validate_fail;
+
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 80fccd4999a5..ba1310c8fd77 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP(
MaxLinkBPP = 2 * MaxLinkBPP;
}
+ *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP)
@@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP(
else
return DesiredBPP;
}
-
- *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1);
-
- return BPP_INVALID;
} // TruncToValidBPP
double dml32_RequiredDTBCLK(
@@ -1975,8 +1973,8 @@ void dml32_CalculateVMRowAndSwath(
unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
unsigned int PDEAndMetaPTEBytesFrameY;
unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX] = {0};
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX] = {0};
unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
@@ -4291,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int i, j, k;
unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
unsigned int DRAMClockChangeSupportNumber = 0;
- unsigned int LastSurfaceWithoutMargin;
+ unsigned int LastSurfaceWithoutMargin = 0;
unsigned int DRAMClockChangeMethod = 0;
bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
double MinActiveFCLKChangeMargin = 0.;
@@ -5656,9 +5654,9 @@ void dml32_CalculateStutterEfficiency(
double LastZ8StutterPeriod = 0.0;
double LastStutterPeriod = 0.0;
unsigned int TotalNumberOfActiveOTG = 0;
- double doublePixelClock;
- unsigned int doubleHTotal;
- unsigned int doubleVTotal;
+ double doublePixelClock = 0;
+ unsigned int doubleHTotal = 0;
+ unsigned int doubleVTotal = 0;
bool SameTiming = true;
double DETBufferingTimeY;
double SwathWidthYCriticalSurface = 0.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 80bebfc268db..add169162f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.num_states = 5,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -439,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
@@ -577,6 +577,7 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
{
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
unsigned int i, plane_count = 0;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
@@ -602,11 +603,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && (is_psr || is_replay))
- support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
}
+ DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support,
+ (int)context->bw_ctx.dml.vba.StutterPeriod);
+
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index dc9e1b758ed6..e4f333d4fb54 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -98,55 +98,114 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.clock_limits = {
{
.state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
+ .dscclk_mhz = 200.0,
.dtbclk_mhz = 600.0,
},
{
.state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
+ .dscclk_mhz = 320.0,
.dtbclk_mhz = 600.0,
},
{
.state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
.dtbclk_mhz = 600.0,
},
},
- .num_states = 5,
+ .num_states = 8,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -177,6 +236,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
};
/*
@@ -340,6 +401,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -352,6 +415,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
@@ -408,7 +473,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
@@ -551,6 +616,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
+
/*dcn351 does not support z9/z10*/
if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
@@ -564,11 +630,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
-
/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
- if (is_pwrseq0 && (is_psr || is_replay))
+ if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
-
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index acff3449b8d7..1c9498a72520 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -67,6 +67,7 @@ frame_warn_flag := -Wframe-larger-than=2048
endif
endif
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 9be5ebf3a8c0..3e919f5c00ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -31,6 +31,8 @@
#include "dml_assert.h"
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
+#define TB_BORROWED_MAX 400
+
// ---------------------------
// Declaration Begins
// ---------------------------
@@ -2782,6 +2784,8 @@ static dml_float_t TruncToValidBPP(
}
}
+ *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
+
if (DesiredBPP == 0) {
if (DSCEnable) {
if (MaxLinkBPP < MinDSCBPP) {
@@ -2810,10 +2814,6 @@ static dml_float_t TruncToValidBPP(
return DesiredBPP;
}
}
-
- *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1));
-
- return __DML_DPP_INVALID__;
} // TruncToValidBPP
static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
@@ -3790,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat
dml_bool_t FoundCriticalSurface = false;
dml_uint_t TotalNumberOfActiveOTG = 0;
- dml_float_t SinglePixelClock;
- dml_uint_t SingleHTotal;
- dml_uint_t SingleVTotal;
+ dml_float_t SinglePixelClock = 0;
+ dml_uint_t SingleHTotal = 0;
+ dml_uint_t SingleVTotal = 0;
dml_bool_t SameTiming = true;
dml_float_t LastStutterPeriod = 0.0;
@@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
/* Copy the calculated watermarks to mp.Watermark as the getter functions are
* implemented by the DML team to copy the calculated values from the mp.Watermark interface.
+ * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to
+ * unexpected behavior. memmove should be used.
*/
- memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
+ memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) {
@@ -10214,6 +10216,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m
dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency);
dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep);
dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
+dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark);
dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency);
dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank);
dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
index 8452485684f5..3116b88e99dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
@@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t);
dml_get_var_decl(urgent_latency, dml_float_t);
dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t);
+dml_get_var_decl(wm_writeback_urgent, dml_float_t);
dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t);
dml_get_var_decl(stutter_efficiency, dml_float_t);
dml_get_var_decl(stutter_efficiency_z8, dml_float_t);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
index de63364be01d..14d389525296 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
@@ -41,6 +41,7 @@
#define DCN_DML__VM_PRESENT__1 1
#define DCN_DML__HOST_VM_PRESENT 1
#define DCN_DML__HOST_VM_PRESENT__1 1
+#define DCN_DML__DWB 1
#include "dml_depedencies.h"
@@ -59,6 +60,7 @@
#define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE
#define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR
#define __DML_DPP_INVALID__ 0
+#define __DML_NUM_DMB__ DCN_DML__DWB
#define __DML_PIPE_NO_PLANE__ 99
#define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a52c594e1ba4..ad2a6b4769fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -88,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
@@ -100,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma
return i;
}
- return -1;
+ ASSERT(false);
+ return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
}
// The master pipe of a stream is defined as the top pipe in odm slice 0
@@ -793,8 +795,8 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index);
}
-static unsigned int get_mpc_factor(struct dml2_context *ctx,
- const struct dc_state *state,
+static unsigned int get_target_mpc_factor(struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_status *status,
@@ -805,10 +807,10 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
unsigned int cfg_idx;
unsigned int mpc_factor;
- get_plane_id(ctx, state, status->plane_states[plane_idx],
- stream->stream_id, plane_idx, &plane_id);
- cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
if (ctx->architecture == dml2_architecture_20) {
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
+ cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
} else {
mpc_factor = 1;
@@ -822,16 +824,18 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
return mpc_factor;
}
-static unsigned int get_odm_factor(
+static unsigned int get_target_odm_factor(
const struct dml2_context *ctx,
+ struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_state *stream)
{
- unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id(
- mapping, stream->stream_id);
+ unsigned int cfg_idx;
- if (ctx->architecture == dml2_architecture_20)
+ if (ctx->architecture == dml2_architecture_20) {
+ cfg_idx = find_disp_cfg_idx_by_stream_id(
+ mapping, stream->stream_id);
switch (disp_cfg->hw.ODMMode[cfg_idx]) {
case dml_odm_mode_bypass:
return 1;
@@ -842,83 +846,122 @@ static unsigned int get_odm_factor(
default:
break;
}
+ }
ASSERT(false);
return 1;
}
+static unsigned int get_source_odm_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream);
+
+ return ctx->config.callbacks.get_odm_slice_count(otg_master);
+}
+
+static unsigned int get_source_mpc_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_plane_state *plane)
+{
+ struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
+ int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane,
+ &state->res_ctx, dpp_pipes);
+
+ ASSERT(dpp_pipe_count > 0);
+ return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]);
+}
+
+
static void populate_mpc_factors_for_stream(
struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
+ struct dc_state *state,
unsigned int stream_idx,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
int i;
- for (i = 0; i < status->plane_count; i++)
- if (odm_factor == 1)
- mpc_factors[i] = get_mpc_factor(
- ctx, state, disp_cfg, mapping, status,
- state->streams[stream_idx], i);
- else
- mpc_factors[i] = 1;
+ for (i = 0; i < status->plane_count; i++) {
+ mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]);
+ mpc_factors[i].target = (odm_factor.target == 1) ?
+ get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1;
+ }
}
static void populate_odm_factors(const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_state *state,
- unsigned int odm_factors[MAX_PIPES])
+ struct dc_state *state,
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES])
{
int i;
- for (i = 0; i < state->stream_count; i++)
- odm_factors[i] = get_odm_factor(
- ctx, disp_cfg, mapping, state->streams[i]);
+ for (i = 0; i < state->stream_count; i++) {
+ odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]);
+ odm_factors[i].target = get_target_odm_factor(
+ ctx, state, disp_cfg, mapping, state->streams[i]);
+ }
}
-static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state,
const struct dc_state *existing_state,
const struct dc_stream_state *stream,
const struct dc_stream_status *status,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
int plane_idx;
bool result = true;
- if (odm_factor == 1)
- /*
- * ODM and MPC combines are by DML design mutually exclusive.
- * ODM factor of 1 means MPC factors may be greater than 1.
- * In this case, we want to set ODM factor to 1 first to free up
- * pipe resources from previous ODM configuration before setting
- * up MPC combine to acquire more pipe resources.
- */
+ for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
+ if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target < odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
+ return result;
+}
+
+static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_state *existing_state,
+ const struct dc_stream_state *stream,
+ const struct dc_stream_status *status,
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
+{
+ int plane_idx;
+ bool result = true;
+
for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
- result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
- state,
- existing_state,
- ctx->config.callbacks.dc->res_pool,
- status->plane_states[plane_idx],
- mpc_factors[plane_idx]);
- if (odm_factor > 1)
+ if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target > odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
return result;
}
@@ -928,20 +971,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *existing_state)
{
- unsigned int odm_factors[MAX_PIPES];
- unsigned int mpc_factors_for_stream[MAX_PIPES];
int i;
bool result = true;
- populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors);
- for (i = 0; i < state->stream_count; i++) {
+ populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors);
+ for (i = 0; i < state->stream_count; i++)
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
- i, odm_factors[i], mpc_factors_for_stream);
- result &= map_dc_pipes_for_stream(ctx, state, existing_state,
- state->streams[i],
- &state->stream_status[i],
- odm_factors[i], mpc_factors_for_stream);
- }
+ i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+
return result;
}
@@ -1037,6 +1080,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
ASSERT(false);
}
}
+
+ if (ctx->config.callbacks.build_test_pattern_params &&
+ pipe->stream &&
+ pipe->prev_odm_pipe == NULL &&
+ pipe->top_pipe == NULL)
+ ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
index 2f91244a7b01..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
@@ -30,6 +30,8 @@
#include "dml2_dc_types.h"
struct dml2_context;
+struct dml2_dml_to_dc_pipe_mapping;
+struct dml_display_cfg_st;
/*
* dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index 1cf8a884c0fb..9dab4e43c511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -109,10 +109,21 @@ enum dml2_architecture {
dml2_architecture_20,
};
+struct dml2_pipe_combine_factor {
+ unsigned int source;
+ unsigned int target;
+};
+
+struct dml2_pipe_combine_scratch {
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES];
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES];
+};
+
struct dml2_context {
enum dml2_architecture architecture;
struct dml2_configuration_options config;
struct dml2_helper_det_policy_scratch det_helper_scratch;
+ struct dml2_pipe_combine_scratch pipe_combine_scratch;
union {
struct {
struct display_mode_lib_st dml_core_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 17a58f41fc6a..a41812598ce8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -29,6 +29,7 @@
#include "dml2_translation_helper.h"
#define NUM_DCFCLK_STAS 5
+#define NUM_DCFCLK_STAS_NEW 8
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
@@ -228,17 +229,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
break;
case dml_project_dcn35:
+ case dml_project_dcn351:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
- case dml_project_dcn351:
- out->num_chans = 16;
- out->round_trip_ping_latency_dcfclk_cycles = 1100;
- out->smn_latency_us = 2;
- break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
@@ -253,12 +250,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
{
struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch;
struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params;
- unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS];
+ unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0};
+ unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0};
+ unsigned int dml_project = dml2->v20.dml_core_ctx.project;
+
unsigned int i = 0;
unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type
- p->dcfclk_stas_mhz = dcfclk_stas_mhz;
- p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ if (dml_project == dml_project_dcn351) {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz_new;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW;
+ } else {
+ p->dcfclk_stas_mhz = dcfclk_stas_mhz;
+ p->num_dcfclk_stas = NUM_DCFCLK_STAS;
+ }
+
p->in_bbox = in_bbox;
p->out_states = out;
p->in_states = &dml2->v20.scratch.create_scratch.in_states;
@@ -436,8 +442,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
- dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
// Override last out_state with data from last in_state
// This will ensure that out_state contains max fclk
memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
@@ -1056,7 +1061,46 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
plane_index = 0;
}
}
-
+static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
+ unsigned int location, const struct dc_stream_state *in)
+{
+ if (in->num_wb_info > 0) {
+ for (int i = 0; i < __DML_NUM_DMB__; i++) {
+ const struct dc_writeback_info *wb_info = &in->writeback_info[i];
+ /*current dml support 1 dwb per stream, limitation*/
+ if (wb_info->wb_enabled) {
+ out->WritebackEnable[location] = wb_info->wb_enabled;
+ out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width;
+ out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width;
+ out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height;
+
+ out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+
+ out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ /*current design does not have chroma scaling, need to follow up*/
+ out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.h_taps : 1;
+ out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ?
+ wb_info->dwb_params.scaler_taps.v_taps : 1;
+
+ out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ }
+ }
+ }
+}
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1101,6 +1145,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
+ /*Call site for populate_dml_writeback_cfg_from_stream_state*/
+ populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
+ disp_cfg_stream_location, context->streams[i]);
+
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index f15d1dbad6a9..0f8b3336e26d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -224,7 +224,7 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int
static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane,
unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
{
- int i, j;
+ unsigned int i, j;
bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
if (!plane_id)
@@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
}
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]);
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
@@ -374,10 +376,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz
* 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz
* 1000;
+
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ;
+ }
}
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx)
@@ -396,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display
watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000;
}
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark)
+{
+ unsigned int time_per_byte = 0;
+ unsigned int total_free_entry = 0xb40;
+ unsigned int buf_lh_capability;
+ unsigned int max_scaled_time;
+
+ if (mode == PACKED_444) /* packed mode 32 bpp */
+ time_per_byte = time_per_pixel/4;
+ else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */
+ time_per_byte = time_per_pixel/8;
+
+ if (time_per_byte == 0)
+ time_per_byte = 1;
+
+ buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/
+ max_scaled_time = buf_lh_capability - urgent_watermark;
+ return max_scaled_time;
+}
+
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx)
+{
+ int i, j = 0;;
+ struct mcif_arb_params *wb_arb_params = NULL;
+ struct dcn_bw_writeback *bw_writeback = NULL;
+ enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/
+
+ if (context->stream_count != 0) {
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->num_wb_info != 0)
+ j++;
+ }
+ }
+ if (j == 0) /*no dwb */
+ return;
+ for (i = 0; i < __DML_NUM_DMB__; i++) {
+ bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback;
+ wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i];
+
+ for (j = 0 ; j < 4; j++) {
+ /*current dml only has one set of watermark, need to follow up*/
+ bw_writeback->mcif_wb_arb[i].cli_watermark[j] =
+ dml_get_wm_writeback_urgent(dml_core_ctx) * 1000;
+ bw_writeback->mcif_wb_arb[i].pstate_watermark[j] =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+ }
+ if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) {
+ /* time_per_pixel should be in u6.6 format */
+ bw_writeback->mcif_wb_arb[i].time_per_pixel =
+ (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+ }
+ bw_writeback->mcif_wb_arb[i].slice_lines = 32;
+ bw_writeback->mcif_wb_arb[i].arbitration_slice = 2;
+ bw_writeback->mcif_wb_arb[i].max_scaled_time =
+ dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel,
+ wbif_mode, wb_arb_params->cli_watermark[0]);
+ /*not required any more*/
+ bw_writeback->mcif_wb_arb[i].dram_speed_change_duration =
+ dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000;
+
+ }
+}
void dml2_initialize_det_scratch(struct dml2_context *in_ctx)
{
int i;
@@ -468,6 +541,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
else
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
+
+ if (dml_pipe_idx == 0xFFFFFFFF)
+ continue;
total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx);
if (total_det_allocated > max_det_size) {
need_recalculation = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
index 5842d6d3c4b6..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
@@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig
unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled);
void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context);
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
+void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx);
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
bool dml2_is_stereo_timing(const struct dc_stream_state *stream);
+unsigned int dml2_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark);
/*
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 72cca367062e..9412d5384a41 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -570,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
struct dml2_dcn_clocks out_clks;
unsigned int result = 0;
bool need_recalculation = false;
+ uint32_t cstate_enter_plus_exit_z8_ns;
if (!context || context->stream_count == 0)
return true;
@@ -639,8 +640,17 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
+
+ cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+
+ if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time &&
+ cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000)
+ cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000;
+
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
}
return result;
@@ -681,13 +691,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
{
bool out = false;
- if (!(context->bw_ctx.dml2))
+ if (!dml2)
return false;
- dml2_apply_debug_options(in_dc, context->bw_ctx.dml2);
+ dml2_apply_debug_options(in_dc, dml2);
/* Use dml_validate_only for fast_validate path */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index cc662d682fd4..4a8bd2f4195e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -71,6 +71,7 @@ struct dml2_dcn_clocks {
struct dml2_dc_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
+ void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master);
bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context);
bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm);
bool (*update_pipes_for_stream_with_slice_count)(
@@ -86,8 +87,23 @@ struct dml2_dc_callbacks {
const struct dc_plane_state *plane,
int slice_count);
int (*get_odm_slice_index)(const struct pipe_ctx *opp_head);
+ int (*get_odm_slice_count)(const struct pipe_ctx *opp_head);
int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe);
+ int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe);
struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *(*get_otg_master_for_stream)(
+ struct resource_context *res_ctx,
+ const struct dc_stream_state *stream);
+ int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *opp_heads[MAX_PIPES]);
+ int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *dpp_pipes[MAX_PIPES]);
+ struct dc_stream_status *(*get_stream_status)(
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id);
};
struct dml2_dc_svp_callbacks {
@@ -96,10 +112,10 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *main_stream);
- struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc,
struct dc_state *state,
struct dc_plane_state *main_plane);
- enum dc_status (*add_phantom_stream)(struct dc *dc,
+ enum dc_status (*add_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream,
struct dc_stream_state *main_stream);
@@ -108,7 +124,7 @@ struct dml2_dc_svp_callbacks {
struct dc_stream_state *stream,
struct dc_plane_state *plane_state,
struct dc_state *context);
- enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ enum dc_status (*remove_phantom_stream)(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
void (*release_phantom_plane)(const struct dc *dc,
@@ -121,6 +137,15 @@ struct dml2_dc_svp_callbacks {
enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
+ bool (*remove_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ void (*release_phantom_streams_and_planes)(
+ const struct dc *dc,
+ struct dc_state *state);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
};
struct dml2_clks_table_entry {
@@ -191,6 +216,8 @@ struct dml2_configuration_options {
unsigned int max_segments_per_hubp;
unsigned int det_segment_size;
bool map_dc_pipes_with_callbacks;
+
+ bool use_clock_dc_limits;
};
/*
@@ -244,6 +271,7 @@ void dml2_reinit(const struct dc *in_dc,
*/
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
+ struct dml2_context *dml2,
bool fast_validate);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
new file mode 100644
index 000000000000..99bd36073561
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile
@@ -0,0 +1,77 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'dpp' sub-component of DAL.
+#
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o
+
+AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10)
+
+###############################################################################
+
+DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o
+
+AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20)
+
+###############################################################################
+
+DPP_DCN201 = dcn201_dpp.o
+
+AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201)
+
+###############################################################################
+
+DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o
+
+AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30)
+
+###############################################################################
+
+DPP_DCN32 = dcn32_dpp.o
+
+AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32)
+
+###############################################################################
+
+DPP_DCN35 = dcn35_dpp.o
+
+AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35)
+
+###############################################################################
+
+endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
new file mode 100644
index 000000000000..1318c6fba3e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt
@@ -0,0 +1,6 @@
+dal3_subdirectory_sources(
+ dcn10_dpp.c
+ dcn10_dpp_cm.c
+ dcn10_dpp_dscl.c
+ dcn10_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 4e391fd1d71c..e1da48b05d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index a039eedc7c24..c48139bed11f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1090,7 +1090,8 @@
type DPP_CLOCK_ENABLE; \
type CM_HDR_MULT_COEF; \
type CUR0_FP_BIAS; \
- type CUR0_FP_SCALE;
+ type CUR0_FP_SCALE;\
+ type DISPCLK_R_GATE_DISABLE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
index 2f994a3a0b9c..006e23842016 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
@@ -28,9 +28,9 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
-#include "dcn10_cm_common.h"
+#include "dcn10/dcn10_cm_common.h"
#define NUM_PHASES 64
#define HORZ_MAX_TAPS 8
@@ -234,7 +234,7 @@ void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
enum gamut_remap_select select;
read_gamut_remap(dpp, arr_reg_val, &select);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
index 5ca9ab8a76e8..808bca9fb804 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn10_dpp.h"
+#include "dcn10/dcn10_dpp.h"
#include "basics/conversion.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
new file mode 100644
index 000000000000..9c2d7096348e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn20_dpp.c
+ dcn20_dpp_cm.c
+ dcn20_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
index 1516c0a48726..56ebd7164dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#define NUM_PHASES 64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index 672cde46c4b9..49cb25c9cb36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -736,7 +736,7 @@ bool dpp20_program_shaper(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void dpp2_cnv_set_alpha_keyer(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
index 58dc69926e8a..31613372e214 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn20_dpp.h"
+#include "dcn20/dcn20_dpp.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
@@ -274,7 +274,7 @@ void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
enum dcn20_gamut_remap_select select;
read_gamut_remap(dpp, arr_reg_val, &select);
@@ -1114,15 +1114,15 @@ static void dpp20_select_3dlut_ram_mask(
bool dpp20_program_3dlut(
struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
new file mode 100644
index 000000000000..7711cd3c47a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn201_dpp.c
+ dcn201_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
index f809a7d21033..345202fee40f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn201_dpp.h"
+#include "dcn201/dcn201_dpp.h"
#include "basics/conversion.h"
#define REG(reg)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
index cbd5b47b4acf..cbd5b47b4acf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
new file mode 100644
index 000000000000..0faee2a1e32b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt
@@ -0,0 +1,5 @@
+dal3_subdirectory_sources(
+ dcn30_dpp.c
+ dcn30_dpp_cm.c
+ dcn30_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index a3a769aad042..f8c0cee34080 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -293,9 +293,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
@@ -319,9 +321,11 @@ void dpp3_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
@@ -1384,15 +1388,15 @@ static void dpp3_select_3dlut_ram_mask(
}
static bool dpp3_program_3dlut(struct dpp *dpp_base,
- struct tetrahedral_params *params)
+ const struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
bool is_12bits_color_channel;
- struct dc_rgb *lut0;
- struct dc_rgb *lut1;
- struct dc_rgb *lut2;
- struct dc_rgb *lut3;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index 2ac8045a87a1..269f437c1633 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -132,6 +132,8 @@
SRI(CM_POST_CSC_B_C33_C34, CM, id), \
SRI(CM_MEM_PWR_CTRL, CM, id), \
SRI(CM_CONTROL, CM, id), \
+ SRI(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI(CM_TEST_DEBUG_DATA, CM, id), \
SRI(FORMAT_CONTROL, CNVC_CFG, id), \
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -294,6 +296,7 @@
TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
+ TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -426,6 +429,7 @@
type CM_GAMCOR_LUT_DATA; \
type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \
type CM_GAMCOR_LUT_READ_COLOR_SEL; \
+ type CM_GAMCOR_LUT_READ_DBG; \
type CM_GAMCOR_LUT_HOST_SEL; \
type CM_GAMCOR_LUT_CONFIG_MODE; \
type CM_GAMCOR_LUT_STATUS; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 2f5b3fbd3507..82eca0e7b7d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -26,9 +26,9 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn30_dpp.h"
+#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -445,7 +445,7 @@ void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
- uint16_t arr_reg_val[12];
+ uint16_t arr_reg_val[12] = {0};
int select;
read_gamut_remap(dpp, arr_reg_val, &select);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
new file mode 100644
index 000000000000..7743edc4599f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn32_dpp.c
+ dcn32_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index 681e75c6dbaf..41679997b44d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "core_types.h"
#include "reg_helper.h"
-#include "dcn32_dpp.h"
+#include "dcn32/dcn32_dpp.h"
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
index 572958d287eb..572958d287eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
new file mode 100644
index 000000000000..91df5db26435
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn35_dpp.c
+ dcn35_dpp.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
new file mode 100644
index 000000000000..e16274fee31d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35/dcn35_dpp.h"
+#include "reg_helper.h"
+
+#define REG(reg) dpp->tf_regs->reg
+
+#define CTX dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
+ ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
+
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (enable) {
+ if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL,
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 1,
+ DISPCLK_R_GATE_DISABLE, 1);
+ } else
+ REG_UPDATE_2(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 0,
+ DISPCLK_R_GATE_DISABLE, 0);
+}
+
+static struct dpp_funcs dcn35_dpp_funcs = {
+ .dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
+ .dpp_read_state = dpp30_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = NULL,
+ .dpp_set_pre_degam = dpp3_set_pre_degam,
+ .dpp_program_input_lut = NULL,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp3_cnv_setup,
+ .dpp_program_degamma_pwl = NULL,
+ .dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
+ .dpp_program_cm_bias = dpp3_program_cm_bias,
+
+ .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP
+ .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+ .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
+
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp3_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp35_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+};
+
+
+bool dpp35_construct(
+ struct dcn3_dpp *dpp, struct dc_context *ctx,
+ uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn35_dpp_shift *tf_shift,
+ const struct dcn35_dpp_mask *tf_mask)
+{
+ bool ret = dpp32_construct(dpp, ctx, inst, tf_regs,
+ (const struct dcn3_dpp_shift *)(tf_shift),
+ (const struct dcn3_dpp_mask *)(tf_mask));
+
+ dpp->base.funcs = &dcn35_dpp_funcs;
+ return ret;
+}
+
+void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
+{
+ REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
index 09b84307cd9e..135872d88219 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
@@ -31,7 +31,9 @@
#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \
DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \
- TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh)
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh)
#define DPP_REG_FIELD_LIST_DCN35(type) \
struct { \
@@ -47,6 +49,11 @@ struct dcn35_dpp_mask {
DPP_REG_FIELD_LIST_DCN35(uint32_t);
};
+void dpp35_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable);
+
bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx,
uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
const struct dcn35_dpp_shift *tf_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index ac41f9c0a283..150ef23440a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -458,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
- struct dc_dsc_config config;
+ struct dc_dsc_config config = {0};
struct dc_dsc_config_options options = {0};
options.dsc_min_slice_height_override = dsc_min_slice_height_override;
@@ -868,9 +868,9 @@ static bool setup_dsc_config(
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
- int max_slices_h;
- int min_slices_h;
- int num_slices_h;
+ int max_slices_h = 0;
+ int min_slices_h = 0;
+ int num_slices_h = 0;
int pic_width;
int slice_width;
int target_bpp;
@@ -1055,7 +1055,12 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- dsc_cfg->num_slices_v = pic_height/slice_height;
+ if (slice_height > 0) {
+ dsc_cfg->num_slices_v = pic_height / slice_height;
+ } else {
+ is_dsc_possible = false;
+ goto done;
+ }
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 36d6c1646a51..59864130cf83 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
{
int ret;
struct drm_dsc_config dsc_cfg;
- unsigned long long tmp;
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
@@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
- tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
- do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
- dsc_params->bytes_per_pixel = (uint32_t)tmp;
+ dsc_params->bytes_per_pixel =
+ (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)),
+ (uint32_t)dsc_cfg.slice_width)); /* Round-up */
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index d734e3a134d1..2840ed5c57d8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -95,10 +95,6 @@ static bool offset_to_id(
return true;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
return false;
}
break;
@@ -184,11 +180,6 @@ static bool offset_to_id(
/* UNEXPECTED */
default:
/* case REG(DC_GPIO_SYNCA_A): not exista */
-#ifdef PALLADIUM_SUPPORTED
- *id = GPIO_ID_HPD;
- *en = GPIO_DDC_LINE_DDC1;
- return true;
-#endif
ASSERT_CRITICAL(false);
return false;
}
@@ -308,10 +299,6 @@ static bool id_to_offset(
break;
default:
ASSERT_CRITICAL(false);
-#ifdef PALLADIUM_SUPPORTED
- info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
- result = true;
-#endif
result = false;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 3ede6e02c3a7..663c17f52779 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -128,7 +128,7 @@ struct gpio *dal_gpio_service_create_irq(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
@@ -144,7 +144,7 @@ struct gpio *dal_gpio_service_create_generic_mux(
uint32_t offset,
uint32_t mask)
{
- enum gpio_id id;
+ enum gpio_id id = 0;
uint32_t en;
struct gpio *generic;
@@ -178,7 +178,7 @@ struct gpio_pin_info dal_gpio_get_generic_pin_info(
enum gpio_id id,
uint32_t en)
{
- struct gpio_pin_info pin;
+ struct gpio_pin_info pin = {0};
if (service->translate.funcs->id_to_offset) {
service->translate.funcs->id_to_offset(id, en, &pin);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 9e8e9de51a92..cf8aa23b4415 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -180,7 +180,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
-HWSS_DCN351 = dcn351_init.o
+HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o
AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351))
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 9d5df4c0da59..0d3ea291eeee 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -249,7 +249,7 @@ static bool dce110_enable_display_power_gating(
return false;
}
-static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+static void dce110_prescale_params(struct ipp_prescale_params *prescale_params,
const struct dc_plane_state *plane_state)
{
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
@@ -289,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (ipp == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- build_prescale_params(&prescale_params, plane_state);
+ dce110_prescale_params(&prescale_params, plane_state);
ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
- if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity &&
+ if (!plane_state->gamma_correction.is_identity &&
dce_use_lut(plane_state->format))
- ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+ ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction);
if (tf == NULL) {
/* Default case if no input transfer function specified */
@@ -614,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
xfm->funcs->opp_power_on_regamma_lut(xfm, true);
xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) {
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) {
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
- } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func,
+ } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func,
&xfm->regamma_params)) {
xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
@@ -1185,22 +1182,13 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- /* TODO: This looks like a bug to me as we are disabling HPO IO when
- * we are just disabling a single HPO stream. Shouldn't we disable HPO
- * HW control only when HPOs for all streams are disabled?
- */
- if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
- pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
- pipe_ctx->stream->ctx->dc->hwseq, false);
- }
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1549,7 +1537,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
}
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2200,7 +2188,7 @@ static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- int i;
+ unsigned int i;
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
@@ -2272,7 +2260,7 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
+ struct audio_output audio_output = {0};
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2287,6 +2275,19 @@ static void dce110_setup_audio_dto(
}
}
+static bool dce110_is_hpo_enabled(struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
@@ -2295,6 +2296,8 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
+ bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state);
+ bool is_hpo_enabled = dce110_is_hpo_enabled(context);
/* reset syncd pipes from disabled pipes */
if (dc->config.use_pipe_ctx_sync_logic)
@@ -2337,6 +2340,10 @@ enum dc_status dce110_apply_ctx_to_hw(
dce110_setup_audio_dto(dc, context);
+ if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) {
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled);
+ }
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 314798400b16..0c4aef8ffe2c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -367,7 +367,7 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.dpp.ocsc);
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -1366,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -1488,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
/* Power gate DSCs */
if (hws->funcs.dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1813,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL)
return false;
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
+ tf = &plane_state->in_transfer_func;
- if (plane_state->gamma_correction &&
- !dpp_base->ctx->dc->debug.always_use_regamma
- && !plane_state->gamma_correction->is_identity
+ if (!dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction.is_identity
&& dce_use_lut(plane_state->format))
- dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
+ dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
if (tf == NULL)
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
@@ -1861,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
#define MAX_NUM_HW_POINTS 0x200
static void log_tf(struct dc_context *ctx,
- struct dc_transfer_func *tf, uint32_t hw_points_num)
+ const struct dc_transfer_func *tf, uint32_t hw_points_num)
{
// DC_LOG_GAMMA is default logging of all hw points
// DC_LOG_ALL_GAMMA logs all points, not only hw points
@@ -1898,16 +1912,15 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
- if (stream->out_transfer_func &&
- stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
@@ -1915,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream->ctx &&
- stream->out_transfer_func) {
+ if (stream->ctx) {
log_tf(stream->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
}
@@ -2173,7 +2185,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
- unsigned int pclk;
+ unsigned int pclk = 0;
uint32_t embedded_pix_clk_100hz;
uint16_t embedded_h_total;
@@ -2264,7 +2276,7 @@ void dcn10_enable_vblanks_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height, master;
+ int i, width = 0, height = 0, master;
DC_LOGGER_INIT(dc_ctx->logger);
@@ -2330,7 +2342,7 @@ void dcn10_enable_timing_synchronization(
struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
- int i, width, height;
+ int i, width = 0, height = 0;
DC_LOGGER_INIT(dc_ctx->logger);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 8b3536c380b8..7d833fa6dd77 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -155,7 +155,7 @@ void dcn20_log_color_state(struct dc *dc,
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
" OGAM mode\n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -403,7 +403,7 @@ void dcn20_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
@@ -873,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+
+ if (dccg->funcs->set_dtbclk_p_src)
+ dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
if (dc_is_hdmi_tmds_signal(stream->signal)) {
stream->link->phy_state.symclk_ref_cnts.otg = 1;
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
@@ -959,22 +975,6 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- if (dccg->funcs->set_dtbclk_p_src)
- dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst);
-
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- }
-
return DC_OK;
}
@@ -1011,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
/*
* program OGAM only for the top pipe
* if there is a pipe split then fix diagnostic is required:
@@ -1022,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
if (mpc->funcs->power_on_mpc_mem_pwr)
mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->top_pipe == NULL
- && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm_helper_translate_curve_to_hw_format(dc->ctx,
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/*
* there is no ROM
*/
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
/*
@@ -1050,17 +1050,15 @@ bool dcn20_set_blend_lut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -1072,24 +1070,21 @@ bool dcn20_set_shaper_3dlut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *shaper_lut = NULL;
-
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- shaper_lut = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- shaper_lut = &dpp_base->shaper_params;
- }
+ const struct pwl_params *shaper_lut = NULL;
+
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ shaper_lut = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
}
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->state.bits.initialized == 1)
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
- &plane_state->lut3d_func->lut_3d);
+ &plane_state->lut3d_func.lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
@@ -1112,15 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc,
hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
hws->funcs.set_blend_lut(pipe_ctx, plane_state);
- if (plane_state->in_transfer_func)
- tf = plane_state->in_transfer_func;
-
-
- if (tf == NULL) {
- dpp_base->funcs->dpp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_BYPASS);
- return true;
- }
+ tf = &plane_state->in_transfer_func;
if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
use_degamma_ram = true;
@@ -1917,9 +1904,11 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
- if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
- dc->res_pool->hubbub->funcs->program_det_size(
- dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2080,9 +2069,11 @@ void dcn20_program_front_end_for_ctx(
* turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
* DET allocation.
*/
- if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2893,11 +2884,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- if (dc->hwseq->funcs.setup_hpo_hw_control)
- dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
- }
-
- if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323338..ef6488165b8f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
+ .log_color_state = dcn20_log_color_state,
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d5769f38874f..6be846635a79 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -167,7 +167,7 @@ void dcn201_init_blank(
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
/* program opp dpg blank color */
color_space = COLOR_SPACE_SRGB;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 7252f5f781f0..804be977ea47 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -66,7 +66,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c
int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 8bc3d01537bb..ed9141a67db3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -166,7 +166,7 @@ void dcn30_log_color_state(struct dc *dc,
"C21 C22 C23 C24 "
"C31 C32 C33 C34 \n");
- for (i = 0; i < pool->pipe_count; i++) {
+ for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
@@ -223,16 +223,14 @@ bool dcn30_set_blend_lut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
bool result = true;
- struct pwl_params *blend_lut = NULL;
-
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- blend_lut = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(
- plane_state->blend_tf, &dpp_base->regamma_params, false);
- blend_lut = &dpp_base->regamma_params;
- }
+ const struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(
+ &plane_state->blend_tf, &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
@@ -300,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc,
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (dpp_base == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -378,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (pipe_ctx->top_pipe == NULL) {
/*program rmu shaper and 3dlut in MPC*/
ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -804,7 +799,7 @@ void dcn30_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -818,7 +813,7 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
enable);
/* Wait for two frame to make sure AV mute is sent out */
- if (enable) {
+ if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
@@ -890,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
uint32_t tmr_delay = 0, tmr_scale = 0;
- struct dc_cursor_attributes cursor_attr;
+ struct dc_cursor_attributes cursor_attr = {0};
bool cursor_cache_enable = false;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
@@ -946,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
plane->address.page_table_base.quad_part == 0 &&
dc->hwss.does_plane_fit_in_mall &&
- dc->hwss.does_plane_fit_in_mall(dc, plane,
+ dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch,
+ plane->plane_size.surface_size.height, plane->format,
cursor_cache_enable ? &cursor_attr : NULL)) {
unsigned int v_total = stream->adjust.v_total_max ?
stream->adjust.v_total_max : stream->timing.v_total;
@@ -1076,11 +1072,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr)
{
// add meta size?
- unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int surface_size = pitch * height *
+ (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
unsigned int mall_size = dc->caps.mall_size_total;
unsigned int cursor_size = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 638f018a3cb5..76b16839486a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -71,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
-bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+bool dcn30_does_plane_fit_in_mall(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index a760f0c6fe98..1c8abb417b6e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -273,7 +273,7 @@ void dcn31_init_hw(struct dc *dc)
// Get DMCUB capabilities
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
void dcn31_dsc_pg_control(
@@ -479,7 +479,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
- struct dcn_hubbub_phys_addr_config config;
+ struct dcn_hubbub_phys_addr_config config = {0};
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3a9cc8ac0c07..0d8a05cf8b1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -69,29 +69,6 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -105,7 +82,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index c0b526cf1786..b8e884368dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -239,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
// Convert number of cache lines required to number of ways
if (dc->debug.force_mall_ss_num_ways > 0) {
num_ways = dc->debug.force_mall_ss_num_ways;
+ } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) {
+ num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
} else {
- num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
+ num_ways = 0;
}
return num_ways;
@@ -261,7 +263,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
for (i = 0; i < dc->current_state->stream_count; i++) {
/* MALL SS messaging is not supported with PSR at this time */
if (dc->current_state->streams[i] != NULL &&
- dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off &&
+ dc->current_state->stream_status[i].plane_count > 0)))
return false;
}
@@ -475,39 +479,35 @@ bool dcn32_set_mcm_luts(
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = true;
- struct pwl_params *lut_params = NULL;
+ const struct pwl_params *lut_params = NULL;
// 1D LUT
- if (plane_state->blend_tf) {
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
}
result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
lut_params = NULL;
// Shaper
- if (plane_state->in_shaper_func) {
- if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func->pwl;
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
- cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
}
result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
- if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1)
- result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id);
+ if (plane_state->lut3d_func.state.bits.initialized == 1)
+ result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
else
result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
@@ -524,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
enum dc_transfer_func_predefined tf;
bool result = true;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
if (mpc == NULL || plane_state == NULL)
return false;
tf = TRANSFER_FUNCTION_UNITY;
- if (plane_state->in_transfer_func &&
- plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
- tf = plane_state->in_transfer_func->tf;
+ if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func.tf;
dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
- if (plane_state->in_transfer_func) {
- if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
- params = &plane_state->in_transfer_func->pwl;
- else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
- cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
- &dpp_base->degamma_params, false))
- params = &dpp_base->degamma_params;
- }
+ if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func.pwl;
+ else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
@@ -562,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- struct pwl_params *params = NULL;
+ const struct pwl_params *params = NULL;
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
/*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
- if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
- if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
- params = &stream->out_transfer_func->pwl;
- else if (pipe_ctx->stream->out_transfer_func->type ==
+ if (ret == false && mpc->funcs->set_output_gamma) {
+ if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func.pwl;
+ else if (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
- stream->out_transfer_func,
+ &stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
/* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
@@ -956,39 +953,16 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
if (dc->ctx->dmub_srv->dmub->fw_version <
- DMUB_FW_VERSION(7, 0, 35)) {
+ DMUB_FW_VERSION(7, 0, 35)) {
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -1015,7 +989,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1103,10 +1077,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -1119,20 +1089,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1586,7 +1542,7 @@ void dcn32_init_blank(
struct output_pixel_processor *opp = NULL;
struct output_pixel_processor *bottom_opp = NULL;
uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
+ uint32_t otg_active_width = 0, otg_active_height = 0;
uint32_t i;
/* program opp dpg blank color */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 4b92df23ff0d..d4989d15e2f1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -349,7 +349,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
- dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
}
if (dc->res_pool->pg_cntl) {
@@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -396,7 +373,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -536,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
}
}
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
+{
+ if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ return;
+
+ if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
+ hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
+ hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
+ }
+}
+
void dcn35_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
@@ -720,6 +690,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
struct hubbub *hubbub = dc->res_pool->hubbub;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
bool can_apply_seamless_boot = false;
+ bool tg_enabled[MAX_PIPES] = {false};
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization) {
@@ -801,6 +772,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
// requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
+ tg_enabled[i] = true;
continue;
}
@@ -842,6 +814,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+ /* Clean up MPC tree */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (tg_enabled[i]) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
+ if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
+ int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
+
+ if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ }
+ }
+ }
+ }
+
if (pg_cntl != NULL) {
if (pg_cntl->funcs->dsc_pg_control != NULL) {
uint32_t num_opps = 0;
@@ -1002,6 +988,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1019,8 +1008,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
- if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
- pipe_ctx->plane_res.mpcc_inst >= 0)
+ if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
if (pipe_ctx->stream_res.dsc)
@@ -1028,6 +1016,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
}
/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
@@ -1085,6 +1076,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (j == PG_OPTC && new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
} else if (cur_pipe->plane_state == new_pipe->plane_state ||
cur_pipe == new_pipe) {
@@ -1114,6 +1108,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
new_pipe->stream_res.tg)
update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
+
+ if (j == PG_DPSTREAM &&
+ cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
+ new_pipe->stream_res.hpo_dp_stream_enc)
+ update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
}
}
}
@@ -1129,6 +1128,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
+ if (hpo_frl_stream_enc_acquired)
+ update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+
}
/**
@@ -1253,14 +1255,19 @@ void dcn35_root_clock_control(struct dc *dc,
if (!pg_cntl)
return;
/*enable root clock first when power up*/
- if (power_on)
+ if (power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
@@ -1273,14 +1280,19 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
/*disable root clock first when power down*/
- if (!power_on)
+ if (!power_on) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
}
+ if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
+ if (dc->hwseq->funcs.dpstream_root_clock_control)
+ dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
+
+ }
}
void dcn35_prepare_bandwidth(
@@ -1321,22 +1333,6 @@ void dcn35_optimize_bandwidth(
}
}
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->set_idle_state)
- dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
-}
-
-uint32_t dcn35_get_idle_state(const struct dc *dc)
-{
- // TODO: Find a more suitable communcation
- if (dc->clk_mgr->funcs->get_idle_state)
- return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
-
- return 0;
-}
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust)
{
@@ -1394,3 +1390,31 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
+
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
+{
+ int i = 0;
+ struct long_vtotal_params params = {0};
+
+ params.vertical_total_max = v_total_max;
+ params.vertical_total_min = v_total_min;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (!pipe_ctx[i])
+ continue;
+
+ if (pipe_ctx[i]->stream) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+
+ if (timing)
+ params.vertical_blank_start = timing->v_total - timing->v_front_porch;
+ else
+ params.vertical_blank_start = 0;
+
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
+ pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, &params);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index c354efa6c1b2..a731c8880d60 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow
void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
+void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on);
+
void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable);
@@ -84,13 +86,13 @@ void dcn35_dsc_pg_control(
unsigned int dsc_inst,
bool power_on);
-void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
-uint32_t dcn35_get_idle_state(const struct dc *dc);
-
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust);
void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params);
+void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
+ int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a93073055e7b..df3bf77f3fb4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -121,8 +121,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
+ .set_long_vtotal = dcn35_set_long_vblank,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -148,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
index b24ad27fe6ef..a4b3c1e99ec6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -1,16 +1,27 @@
#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+# Copyright (c) 2022-2024 Advanced Micro Devices, Inc.
#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
#
-# Authors: AMD
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
#
# Makefile for DCN351.
-DCN351 = dcn351_init.o
+DCN351 = dcn351_hwseq.o dcn351_init.o
AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
new file mode 100644
index 000000000000..93fe5b262a3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "resource.h"
+#include "dcn351_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#define DC_LOGGER_INIT(logger) \
+ struct dal_logger *dc_logger = logger
+
+#define DC_LOGGER \
+ dc_logger
+
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_gate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ !update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+
+ break;
+ }
+ }
+}
+
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state)
+{
+ int i, j;
+
+ dcn35_calc_blocks_to_ungate(dc, context, update_state);
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+
+ break;
+ }
+ }
+}
+
+/**
+ * dcn351_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ }
+ }
+
+ // domain25 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
+
+ // domain23 currently always on.
+ // domain22 currently always on.
+}
+
+/**
+ * dcn351_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 11, DCPG 19: dsc3
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+
+ if (!pg_cntl || dc->debug.ignore_pg)
+ return;
+
+ // domain22 currently always on.
+ // domain23 currently always on.
+
+ /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ // domain25 currently always on.
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
+ }
+
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
index 3341ef71009b..6d8f3bfb668e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,30 +24,18 @@
*
*/
-#include "core_types.h"
-#include "dcn35_dpp.h"
-#include "reg_helper.h"
+#ifndef __DC_HWSS_DCN351_H__
+#define __DC_HWSS_DCN351_H__
-#define REG(reg) dpp->tf_regs->reg
+#include "hw_sequencer_private.h"
-#define CTX dpp->base.ctx
+void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn351_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
-#undef FN
-#define FN(reg_name, field_name) \
- ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \
- ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name
-
-bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx,
- uint32_t inst, const struct dcn3_dpp_registers *tf_regs,
- const struct dcn35_dpp_shift *tf_shift,
- const struct dcn35_dpp_mask *tf_mask)
-{
- return dpp32_construct(dpp, ctx, inst, tf_regs,
- (const struct dcn3_dpp_shift *)(tf_shift),
- (const struct dcn3_dpp_mask *)(tf_mask));
-}
-
-void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable)
-{
- REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable);
-}
+#endif /* __DC_HWSS_DCN351_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index ab17fa1c64e8..a53092cd619b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -32,6 +32,7 @@
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
#include "dcn35/dcn35_hwseq.h"
+#include "dcn351/dcn351_hwseq.h"
#include "dcn351_init.h"
@@ -67,7 +68,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
@@ -120,8 +121,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -147,6 +146,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
//.hubp_pg_control = dcn35_hubp_pg_control,
.enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn35_update_odm,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index f89f205e42a1..7c339e7e7117 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -377,7 +377,10 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
- bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+ bool (*does_plane_fit_in_mall)(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
@@ -424,11 +427,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
- void (*set_idle_state)(const struct dc *dc, bool allow_idle);
- uint32_t (*get_idle_state)(const struct dc *dc);
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
+ void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
};
void color_space_to_black_color(
@@ -478,9 +480,10 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
- int *num_steps,
+ unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
- struct dc_stream_status *stream_status);
+ struct dc_stream_status *stream_status,
+ struct dc_state *context);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 554cfab5ab24..341219cf4144 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -120,6 +120,10 @@ struct hwseq_private_funcs {
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool clock_on);
+ void (*dpstream_root_clock_control)(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
void (*dpp_pg_control)(struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b1b72e688f74..028b2f971e36 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -90,6 +90,9 @@ struct resource_funcs {
void (*update_soc_for_wm_a)(
struct dc *dc, struct dc_state *context);
+ unsigned int (*calculate_mall_ways_from_bytes)(
+ const struct dc *dc,
+ unsigned int total_size_in_mall_bytes);
/**
* @populate_dml_pipes - Populate pipe data struct
*
@@ -336,7 +339,9 @@ struct stream_resource {
};
struct plane_resource {
+ /* scl_data is scratch space required to program a plane */
struct scaler_data scl_data;
+ /* Below pointers to hw objects are required to enable the plane */
struct hubp *hubp;
struct mem_input *mi;
struct input_pixel_processor *ipp;
@@ -496,7 +501,7 @@ struct dcn_bw_writeback {
struct dcn_bw_output {
struct dc_clocks clk;
- struct dcn_watermark_set watermarks;
+ union dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
unsigned int mall_ss_size_bytes;
@@ -515,6 +520,7 @@ struct bw_context {
union bw_output bw;
struct display_mode_lib dml;
struct dml2_context *dml2;
+ struct dml2_context *dml2_dc_power_source;
};
struct dc_dmub_cmd {
@@ -522,25 +528,6 @@ struct dc_dmub_cmd {
enum dm_dmub_wait_type wait_type;
};
-struct dc_scratch_space {
- /* used to temporarily backup plane states of a stream during
- * dc update. The reason is that plane states are overwritten
- * with surface updates in dc update. Once they are overwritten
- * current state is no longer valid. We want to temporarily
- * store current value in plane states so we can still recover
- * a valid current state during dc update.
- */
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
- struct dc_gamma gamma_correction[MAX_SURFACE_NUM];
- struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM];
- struct dc_3dlut lut3d_func[MAX_SURFACE_NUM];
- struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM];
- struct dc_transfer_func blend_tf[MAX_SURFACE_NUM];
-
- struct dc_stream_state stream_state;
- struct dc_transfer_func out_transfer_func;
-};
-
/**
* struct dc_state - The full description of a state requested by users
*/
@@ -623,8 +610,7 @@ struct dc_state {
unsigned int stutter_period_us;
} perf_params;
-
- struct dc_scratch_space scratch;
+ enum dc_power_source_type power_source;
};
struct replay_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 9e4ddc985240..55529c5f471c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,7 +31,7 @@
#define __DCN_CALCS_H__
#include "bw_fixed.h"
-#include "../dml/display_mode_lib.h"
+#include "dml/display_mode_lib.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 17e014d3bdc8..4f7480f60c85 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -281,8 +281,6 @@ struct clk_mgr_funcs {
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
void (*exit_low_power_state)(struct clk_mgr *clk_mgr);
bool (*is_ips_supported)(struct clk_mgr *clk_mgr);
- void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle);
- uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr);
void (*init_clocks)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index f4d4a68c91dc..4ba18ea57aad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -349,7 +349,7 @@ struct clk_mgr_internal {
enum dm_pp_clocks_state cur_min_clks_state;
bool periodic_retraining_disabled;
- unsigned int cur_phyclk_req_table[MAX_PIPES * 2];
+ unsigned int cur_phyclk_req_table[MAX_LINKS];
bool smu_present;
void *wm_range_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index b9a06bf84cc9..d4c7885fc916 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,6 +59,7 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
+ enum streamclk_source clk_src;
uint64_t pixclk_hz;
uint64_t refclk_hz;
};
@@ -105,6 +106,10 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
+ void (*set_dpstreamclk_root_clock_gating)(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool enable);
void (*set_dpstreamclk)(
struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 2ae7484d18af..305fdc127bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -160,7 +160,7 @@ struct hubbub_funcs {
bool (*program_watermarks)(
struct hubbub *hubbub,
- struct dcn_watermark_set *watermarks,
+ union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0f24afbf4388..ca8de345d039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -322,7 +322,7 @@ struct dpp_funcs {
const struct pwl_params *params);
bool (*dpp_program_3dlut)(
struct dpp *dpp,
- struct tetrahedral_params *params);
+ const struct tetrahedral_params *params);
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 729ca0064e94..063efc8128a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -147,9 +147,10 @@ struct dwb_caps {
unsigned int support_ogam :1;
unsigned int support_wbscl :1;
unsigned int support_ocsc :1;
- unsigned int support_stereo :1;
+ unsigned int support_stereo :1;
+ unsigned int support_4k_120p :1;
} caps;
- unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */
+ unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */
};
struct dwbc {
@@ -166,8 +167,9 @@ struct dwbc {
bool dwb_is_drc;
int wb_src_plane_inst;/*hubp, mpcc, inst*/
uint32_t mask_id;
- int otg_inst;
- bool mvc_cfg;
+ int otg_inst;
+ bool mvc_cfg;
+ struct dc_dwb_params params;
};
struct dwbc_funcs {
@@ -192,6 +194,10 @@ struct dwbc_funcs {
struct dwbc *dwbc,
enum dwb_frame_capture_enable enable);
+ void (*dwb_set_scaler)(
+ struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
@@ -205,9 +211,11 @@ struct dwbc_funcs {
struct dwbc *dwbc,
struct dwb_warmup_params *warmup_params);
-
+ bool (*dwb_get_mcifbuf_line)(
+ struct dwbc *dwbc, unsigned int *buf_idx,
+ unsigned int *cur_line,
+ unsigned int *over_run);
#if defined(CONFIG_DRM_AMD_DC_FP)
-
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
enum dc_color_space color_space,
@@ -216,17 +224,17 @@ struct dwbc_funcs {
bool (*dwb_ogam_set_output_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-
+#endif
//TODO: merge with output_transfer_func?
bool (*dwb_ogam_set_input_transfer_func)(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-#endif
+
+ void (*get_drr_time_stamp)(
+ struct dwbc *dwbc, uint32_t *time_stamp);
+
bool (*get_dwb_status)(
struct dwbc *dwbc);
- void (*dwb_set_scaler)(
- struct dwbc *dwbc,
- struct dc_dwb_params *params);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index dcae23faeee3..c80ebb407add 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,10 +44,11 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
+#define MAX_LINKS (MAX_PIPES * 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
-#define MAX_HPO_DP2_LINK_ENCODERS 2
+#define MAX_HPO_DP2_LINK_ENCODERS 4
struct gamma_curve {
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index dbe7afa9d3a2..af9183f5d69b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -163,12 +163,11 @@ struct link_encoder_funcs {
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
+
void (*set_dio_phy_mux)(
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
- void (*set_dig_output_mode)(
- struct link_encoder *enc, uint8_t pix_per_container);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index b72fb314d804..86c12cd6f47d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -50,11 +50,13 @@ struct dcn_watermarks {
uint32_t usr_retraining_ns;
};
-struct dcn_watermark_set {
- struct dcn_watermarks a;
- struct dcn_watermarks b;
- struct dcn_watermarks c;
- struct dcn_watermarks d;
+union dcn_watermark_set {
+ struct {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+ }; // legacy
};
struct dce_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 9a8bf6ec70ea..8d32e525f05a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -93,6 +93,8 @@ struct dcn_otg_state {
uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_double_buffer_control;
};
void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index a15efadb9183..75b9ec21f297 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -178,10 +178,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index ffad8fe16c54..cd68ecc242c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -64,6 +64,12 @@ struct drr_params {
bool immediate_flip;
};
+struct long_vtotal_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ uint32_t vertical_blank_start;
+};
+
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
@@ -331,6 +337,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
new file mode 100644
index 000000000000..51da368f5c3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef __DC_VPG_H__
+#define __DC_VPG_H__
+
+struct dc_context;
+struct dc_info_packet;
+
+struct vpg;
+
+struct vpg_funcs {
+ void (*update_generic_info_packet)(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
+ void (*vpg_poweron)(
+ struct vpg *vpg);
+
+ void (*vpg_powerdown)(
+ struct vpg *vpg);
+};
+
+struct vpg {
+ const struct vpg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+#endif /* DC_INC_VPG_H_ */ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index bf29fc58ea6a..7ab8ba5e23ed 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -288,7 +288,7 @@ struct link_service {
struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
- const bool is_alpm);
+ const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
const unsigned int *power_opts, uint32_t coasting_vtotal);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 77a60aa9f27b..361ad6b16b96 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -510,6 +510,17 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
/*
* Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in current resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_as_cur_sec_dpp(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
* pipe in any MPCC combine in current resource context.
* return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
* pipe idx of the free pipe
@@ -573,13 +584,6 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_FP)
-struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
- const struct resource_context *res_ctx,
- const struct resource_pool *pool,
- const struct dc_link *link);
-#endif
-
void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
struct dc_state *context);
@@ -615,4 +619,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
+
+/* Setup dc callbacks for dml2
+ * @dc: the display core structure
+ * @dml2_options: struct to hold callbacks
+ */
+void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1c0d89e675da..bb576a9c5fdb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
- struct timing_generator *tg =
- dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ struct timing_generator *tg;
+
+ if (pipe_offset >= MAX_PIPES)
+ return false;
+
+ tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 22b24749c9d2..8d1a1cc94a8b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -884,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc,
{
int i;
struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
+ struct dc_stream_state *link_stream = 0;
struct dc_link_settings store_settings = *link_setting;
link->preferred_link_setting = store_settings;
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index fbcd8fb58ea8..c8c55f196f8d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -24,7 +24,6 @@
*/
#include "link_dp_trace.h"
#include "link/protocols/link_dpcd.h"
-#include "link.h"
void dp_trace_init(struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index b8c4a04dd175..0d523dc43d02 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -516,8 +516,8 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = {0};
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index a72de44a5747..b53ad18dbfbc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -55,6 +55,8 @@
#include "dccg.h"
#include "clk_mgr.h"
#include "atomfirmware.h"
+#include "vpg.h"
+
#define DC_LOGGER \
dc_logger
#define DC_LOGGER_INIT(logger) \
@@ -67,7 +69,6 @@
#define RETIMER_REDRIVER_INFO(...) \
DC_LOG_RETIMER_REDRIVER( \
__VA_ARGS__)
-#include "dc/dcn30/dcn30_vpg.h"
#define MAX_MTP_SLOT_COUNT 64
#define LINK_TRAINING_ATTEMPTS 4
@@ -127,7 +128,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
link->link_enc->funcs->get_dig_frontend &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
- unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (fe != ENGINE_ID_UNKNOWN)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
@@ -725,7 +726,7 @@ static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
static void enable_mst_on_sink(struct dc_link *link, bool enable)
{
- unsigned char mstmCntl;
+ unsigned char mstmCntl = 0;
core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
if (enable)
@@ -803,7 +804,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
if (enable) {
struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
+ struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
@@ -1575,7 +1576,7 @@ static bool write_128b_132b_sst_payload_allocation_table(
break;
}
} else {
- union dpcd_rev dpcdRev;
+ union dpcd_rev dpcdRev = {0};
if (core_link_read_dpcd(
link,
@@ -2119,7 +2120,7 @@ static enum dc_status enable_link_dp_mst(
struct pipe_ctx *pipe_ctx)
{
struct dc_link *link = pipe_ctx->stream->link;
- unsigned char mstm_cntl;
+ unsigned char mstm_cntl = 0;
/* sink signal type after MST branch is MST. Multiple MST sinks
* share one link. Link DP PHY is enable or training only once.
@@ -2285,6 +2286,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2311,6 +2313,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
+
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 289f5d133342..a01d0842bf8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -992,7 +992,7 @@ enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link
static void read_dp_device_vendor_id(struct dc_link *link)
{
- struct dp_device_vendor_id dp_id;
+ struct dp_device_vendor_id dp_id = {0};
/* read IEEE branch device id */
core_link_read_dpcd(
@@ -1087,7 +1087,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
+ uint8_t det_caps[16] = {0}; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -1172,7 +1172,7 @@ static void get_active_converter_info(
set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision = {0};
core_link_read_dpcd(
link,
@@ -1242,7 +1242,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
{
- uint8_t dpcd_data[16];
+ uint8_t dpcd_data[16] = {0};
uint32_t read_dpcd_retry_cnt = 3;
enum dc_status status = DC_ERROR_UNEXPECTED;
union dp_downstream_port_present ds_port = { 0 };
@@ -1408,7 +1408,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
static void retrieve_cable_id(struct dc_link *link)
{
- union dp_cable_id usbc_cable_id;
+ union dp_cable_id usbc_cable_id = {0};
link->dpcd_caps.cable_id.raw = 0;
core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
@@ -1475,7 +1475,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
+ uint8_t lttpr_dpcd_data[8] = {0};
enum dc_status status;
bool is_lttpr_present;
@@ -1931,8 +1931,8 @@ void detect_edp_sink_caps(struct dc_link *link)
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
- uint8_t backlight_adj_cap;
- uint8_t general_edp_cap;
+ uint8_t backlight_adj_cap = 0;
+ uint8_t general_edp_cap = 0;
retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 5491b707cec8..68a8fd7f84d0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -166,7 +166,7 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
uint8_t idx = 0xFF;
int i;
- for (i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_LINKS; ++i) {
if (!dc_struct->links[i] ||
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
@@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
+ for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ba69874be5a4..0fcf0b8530ac 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -120,7 +120,7 @@ bool dp_parse_link_loss_status(
static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
- union dpcd_psr_configuration psr_configuration;
+ union dpcd_psr_configuration psr_configuration = {0};
if (!link->psr_settings.psr_feature_enabled)
return false;
@@ -186,9 +186,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
static void handle_hpd_irq_replay_sink(struct dc_link *link)
{
- union dpcd_replay_configuration replay_configuration;
+ union dpcd_replay_configuration replay_configuration = {0};
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
- union psr_error_status replay_error_status;
+ union psr_error_status replay_error_status = {0};
if (!link->replay_settings.replay_feature_enabled)
return;
@@ -280,7 +280,7 @@ void dp_handle_link_loss(struct dc_link *link)
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
- union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
retval = core_link_read_dpcd(
link,
@@ -320,7 +320,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1] = {0};
retval = core_link_read_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index e538c67d3ed9..1818970b8eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1071,7 +1071,7 @@ enum dc_status dpcd_set_link_settings(
* MUX chip gets link rate set back before link training.
*/
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- uint8_t supported_link_rates[16];
+ uint8_t supported_link_rates[16] = {0};
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
@@ -1587,21 +1587,7 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr) {
- /* ASSR is bound to fail with unsigned PSP
- * verstage used during devlopment phase.
- * Report and continue with eDP panel mode to
- * perform eDP link training with right settings
- */
- bool result;
- result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
- if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode, true);
dp_set_panel_mode(link, panel_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 5d36bab0029c..edb21d21952a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */
@@ -617,7 +617,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
uint32_t retries_eq = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
enum dc_dp_training_pattern tr_pattern;
uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index c5de6ed5bf58..a72c898b64fa 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3
* XXX: Do not allow any two address ranges in this array to overlap
*/
static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
- { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
+ { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }};
/*
* extend addresses to read all mandatory blocks together
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 3baa2bdd6dd6..ad9aca790dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -38,6 +38,7 @@
#include "dc/dc_dmub_srv.h"
#include "dce/dmub_replay.h"
#include "abm.h"
+#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
#define DC_LOGGER_INIT(logger)
@@ -320,8 +321,8 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
struct dc_link_settings link_setting;
- uint8_t link_bw_set;
- uint8_t link_rate_set;
+ uint8_t link_bw_set = 0;
+ uint8_t link_rate_set = 0;
uint32_t req_bw;
union lane_count_set lane_count_set = {0};
@@ -1055,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
}
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm)
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1064,8 +1065,11 @@ bool edp_replay_residency(const struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
+ if (!residency)
+ return false;
+
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm);
+ replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode);
else
*residency = 0;
@@ -1145,3 +1149,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
+
+static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
+ struct link_resource *link_res, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ bool use_hpo_dp_link_enc = false;
+ uint8_t link_enc_index = 0;
+ uint8_t phy_type = 0;
+ uint8_t phy_id = 0;
+
+ if (!pDC->config.use_assr_psp_message)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ if (link_res->hpo_dp_link_enc) {
+ link_enc_index = link_res->hpo_dp_link_enc->inst;
+ use_hpo_dp_link_enc = true;
+ }
+
+ if (enable)
+ phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0);
+
+ phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter);
+
+ cmd.assr_enable.header.type = DMUB_CMD__PSP;
+ cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE;
+ cmd.assr_enable.assr_data.enable = enable;
+ cmd.assr_enable.assr_data.phy_port_type = phy_type;
+ cmd.assr_enable.assr_data.phy_port_id = phy_id;
+ cmd.assr_enable.assr_data.link_enc_index = link_enc_index;
+ cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc;
+
+ dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable)
+{
+ struct link_resource *link_res = &pipe_ctx->link_res;
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+
+ if (*panel_mode != DP_PANEL_MODE_EDP)
+ return;
+
+ if (link->dc->config.use_assr_psp_message) {
+ edp_set_assr_enable(link->dc, link, link_res, enable);
+ } else if (cp_psp && cp_psp->funcs.enable_assr && enable) {
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ bool result;
+
+ result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
+
+ if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
+ *panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index a158c6234d42..cb6d95cc36e4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -61,7 +61,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
union dmub_replay_cmd_set *cmd_data);
bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
- unsigned int *residency, const bool is_start, const bool is_alpm);
+ unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
const unsigned int *power_opts, uint32_t coasting_vtotal);
@@ -76,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
void edp_set_panel_power(struct dc_link *link, bool powerOn);
+void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx,
+ enum dp_panel_mode *panel_mode, bool enable);
#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
index e3d729ab5b9f..caa617883f62 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -35,7 +35,7 @@
bool link_get_hpd_state(struct dc_link *link)
{
- uint32_t state;
+ uint32_t state = 0;
dal_gpio_lock_pin(link->hpd_gpio);
dal_gpio_get_value(link->hpd_gpio, &state);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 0e8f4f36c87c..5574bc628053 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -945,10 +945,19 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
- }
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
@@ -1383,6 +1392,9 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
+
+ s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
bool optc1_get_otg_active_size(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 6c2e84d3967f..2f3bd7648ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -129,6 +129,8 @@ struct dcn_optc_registers {
uint32_t OTG_V_TOTAL_MID;
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL;
+ uint32_t OTG_V_COUNT_STOP_CONTROL2;
uint32_t OTG_TRIGA_CNTL;
uint32_t OTG_TRIGA_MANUAL_TRIG;
uint32_t OTG_MANUAL_FLOW_CONTROL;
@@ -515,12 +517,15 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
+#define V_TOTAL_REGS(type)
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
type OTG_V_SYNC_MODE;\
type OTG_DRR_TRIGGER_WINDOW_START_X;\
type OTG_DRR_TRIGGER_WINDOW_END_X;\
type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\
+ V_TOTAL_REGS(type)\
type OTG_OUT_MUX;\
type OTG_M_CONST_DTO_PHASE;\
type OTG_M_CONST_DTO_MODULO;\
@@ -581,7 +586,9 @@ struct dcn_optc_registers {
type OTG_CRC1_WINDOWB_X_END_READBACK;\
type OTG_CRC1_WINDOWB_Y_START_READBACK;\
type OTG_CRC1_WINDOWB_Y_END_READBACK;\
- type OPTC_FGCG_REP_DIS;
+ type OPTC_FGCG_REP_DIS;\
+ type OTG_V_COUNT_STOP;\
+ type OTG_V_COUNT_STOP_TIMER;
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 58bdbd859bf9..d6f095b4555d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -462,16 +462,6 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* Set the min/max selectors unconditionally so that
- * DMCUB fw may change OTG timings when necessary
- * TODO: Remove the w/a after fixing the issue in DMCUB firmware
- */
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_FORCE_LOCK_ON_EVENT, 0,
- OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index f07a4c7e48bc..52eab8fccb7f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5b1547508850..d393be30dff8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -32,6 +32,7 @@
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
#define REG(reg)\
optc1->tg_regs->reg
@@ -213,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc,
return true;
}
+static void optc35_setup_manual_trigger(struct timing_generator *optc)
+{
+ if (!optc || !optc->ctx)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ struct dc *dc = optc->ctx->dc;
+
+ if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
+ dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst);
+ else {
+ /*
+ * MIN_MASK_EN is gone and MASK is now always enabled.
+ *
+ * To get it to it work with manual trigger we need to make sure
+ * we program the correct bit.
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
+ // Setup manual flow control for EOF via TRIG_A
+ if (optc->funcs && optc->funcs->setup_manual_trigger)
+ optc->funcs->setup_manual_trigger(optc);
+ }
+}
+
+void optc35_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, params->vertical_total_max - 1);
+ optc35_setup_manual_trigger(optc);
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+}
+
+static void optc35_set_long_vtotal(
+ struct timing_generator *optc,
+ const struct long_vtotal_params *params)
+{
+ if (!optc || !params)
+ return;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t vcount_stop_timer = 0, vcount_stop = 0;
+ uint32_t max_otg_v_total = optc1->max_v_total - 1;
+
+ if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total)
+ return;
+
+ if (params->vertical_total_max == 0 || params->vertical_total_min == 0) {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ } else if (params->vertical_total_max == params->vertical_total_min) {
+ vcount_stop = params->vertical_blank_start;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ } else {
+ // Variable rate, keep DRR trigger mask
+ if (params->vertical_total_min > max_otg_v_total) {
+ // cannot be supported
+ // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max,
+ // DRR trigger will drop the vtotal counting directly to a new frame.
+ // But it should trigger between v_total_min and v_total_max.
+ ASSERT(0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0);
+ } else {
+ // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT
+ vcount_stop = params->vertical_total_min;
+ vcount_stop_timer = params->vertical_total_max - max_otg_v_total;
+
+ // Example:
+ // params->vertical_total_min 1000
+ // params->vertical_total_max 2000
+ // MAX_OTG_V_COUNT_STOP = 1500
+ //
+ // If DRR event not happened,
+ // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999
+ // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399
+ // vcount2 0,1,2,3,4,..499,
+ // else (DRR event happened, ex : at line 1004)
+ // time 0,1,2,3,4,...1000,1001.....1004, 0
+ // vcount 0,1,2,3,4....1000,.............. 0 (new frame)
+ // vcount2 0,1,2, 3, -
+ if (optc->funcs && optc->funcs->set_vtotal_min_max)
+ optc->funcs->set_vtotal_min_max(optc,
+ params->vertical_total_min - 1, max_otg_v_total);
+ optc35_setup_manual_trigger(optc);
+
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop);
+ REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer);
+ }
+ }
+}
+
static struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -245,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr,
+ .set_drr = optc35_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
@@ -275,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
+ .set_long_vtotal = optc35_set_long_vtotal,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index 1f422e4c468f..d077e2392379 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -65,10 +65,14 @@
SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\
- SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh)
+ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable);
+void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
#endif /* __DC_OPTC_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 184b1f23aa77..db9048974d74 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -102,10 +102,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21)
###############################################################################
-###############################################################################
-
-###############################################################################
-
RESOURCE_DCN30 = dcn30_resource.o
AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30))
@@ -202,6 +198,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
-###############################################################################
-
endif
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index d1edac46c9a0..88afb2a30eef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll(
default:
return NULL;
}
-
- return NULL;
}
static enum dc_status build_mapped_resource(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 20662edd0ae4..621825a51f46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -1060,7 +1060,7 @@ static bool dce120_resource_construct(
struct irq_service_init_data irq_init_data;
static const struct resource_create_funcs *res_funcs;
bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
- uint32_t pipe_fuses;
+ uint32_t pipe_fuses = 0;
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 35a2cce0c2b8..56ee45e12b46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -56,7 +56,6 @@
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
-/* TODO remove this include */
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index d08d10969251..563c5eec83ff 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -513,7 +513,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = true
+ .p010 = false
},
.max_upscale_factor = {
@@ -569,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_legacy_fast_update = true,
};
static void dcn10_dpp_destroy(struct dpp **dpp)
@@ -1631,6 +1632,7 @@ static bool dcn10_resource_construct(
/* valid pipe num */
pool->base.pipe_count = j;
pool->base.timing_generator_count = j;
+ pool->base.mpcc_count = j;
/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
* the value may be changed
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index a2387cea1af9..6406d31ceefe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -62,6 +62,9 @@
#include "dcn20/dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
+#include "dcn20/dcn20_dwb.h"
+#include "dcn20/dcn20_mmhubbub.h"
+
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -71,9 +74,6 @@
#include "nbio/nbio_2_3_offset.h"
-#include "dcn20/dcn20_dwb.h"
-#include "dcn20/dcn20_mmhubbub.h"
-
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
@@ -83,11 +83,10 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
-#include "link_enc_cfg.h"
-
-#include "amdgpu_socbb.h"
+#include "link_enc_cfg.h"
#include "link.h"
+
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -1282,8 +1281,13 @@ void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
{
+ struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool;
- dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ if (pool->funcs->build_pipe_pix_clk_params) {
+ pool->funcs->build_pipe_pix_clk_params(pipe_ctx);
+ } else {
+ dcn20_build_pipe_pix_clk_params(pipe_ctx);
+ }
pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 914b234d7f6b..070a4efb308b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -55,7 +55,6 @@
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
-#include "dcn201/dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
@@ -182,6 +181,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.socclk_mhz = 1254.0,
.dram_speed_mts = 14000.0,
},
+ /* state4 is not an actual state, just defines unsupported for dml*/
{
.state = 4,
.dscclk_mhz = 400.0,
@@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = {
.num_audio = 2,
.num_stream_encoder = 2,
.num_pll = 2,
+ .num_dwb = 0,
+ .num_dsc = 0,
.num_ddc = 2,
};
@@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_tri_buf = false,
+ .enable_tri_buf = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 65d337731f56..8663cbc3d1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = {
.num_dsc = 3,
};
-#ifdef DIAGS_BUILD
-static const struct resource_caps res_cap_rn_FPGA_4pipe = {
- .num_timing_generator = 4,
- .num_opp = 4,
- .num_video_plane = 4,
- .num_audio = 7,
- .num_stream_encoder = 4,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 0,
-};
-
-static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
- .num_timing_generator = 2,
- .num_opp = 2,
- .num_video_plane = 2,
- .num_audio = 7,
- .num_stream_encoder = 2,
- .num_pll = 4,
- .num_dwb = 1,
- .num_ddc = 4,
- .num_dsc = 2,
-};
-#endif
-
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
.per_pixel_alpha = true,
@@ -1415,16 +1389,11 @@ static bool dcn21_resource_construct(
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
- uint32_t num_pipes;
+ uint32_t num_pipes = 0;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_rn;
-#ifdef DIAGS_BUILD
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc;
- pool->base.res_cap = &res_cap_rn_FPGA_4pipe;
-#endif
pool->base.funcs = &dcn21_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index ecc477ef8e3b..f35cc307830b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw(
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx, vlevel = 0;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
ASSERT(pipes);
@@ -2050,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 25cd6236b054..8bc1bcaeaa47 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1143,7 +1143,7 @@ static bool dcn303_resource_construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
- struct ddc_service_init_data ddc_init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 04d142f97474..d4c3e2754f51 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -75,7 +75,6 @@
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
-// TODO: change include headers /amd/include/asic_reg after upstream
#include "yellow_carp_offset.h"
#include "dcn/dcn_3_1_2_offset.h"
#include "dcn/dcn_3_1_2_sh_mask.h"
@@ -892,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
.using_dml2 = false,
};
@@ -1311,6 +1310,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1645,7 +1646,7 @@ int dcn31_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
bool upscaled = false;
DC_FP_START();
@@ -1767,11 +1768,14 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
- // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
if (pipe_cnt == 0)
fast_validate = false;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index c97391edb5ff..ff50f43e4c00 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -925,27 +925,10 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
+ .enable_legacy_fast_update = true,
.using_dml2 = false,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1384,6 +1367,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1744,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
if (filter_modes_for_single_channel_workaround(dc, context))
goto validate_fail;
@@ -1938,8 +1926,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
/* Disable pipe power gating */
dc->debug.disable_dpp_power_gate = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 515ba435f759..4ce0f4bf1d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index b9753d4606f8..5fd52c5fcee4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -125,7 +125,6 @@
#include "link_enc_cfg.h"
#define DCN3_16_MAX_DET_SIZE 384
-#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
enum dcn31_clk_src_array_id {
@@ -1306,6 +1305,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@@ -1614,7 +1615,7 @@ static int dcn316_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = 0;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index ce1754cc1f46..abd76345d1e4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1304,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1751,6 +1753,9 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
BW_VAL_TRACE_COUNT();
+ if (!pipes)
+ goto validate_fail;
+
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
@@ -1799,7 +1804,9 @@ bool dcn32_validate_bandwidth(struct dc *dc,
bool out = false;
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
else
out = dml1_validate(dc, context, fast_validate);
return out;
@@ -1815,9 +1822,48 @@ int dcn32_populate_dml_pipes_from_context(
struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
+ int subvp_main_pipe_index = -1;
+ enum mall_stream_type mall_type;
+ bool single_display_subvp = false;
+ struct dc_stream_state *stream = NULL;
+ int num_subvp_main = 0;
+ int num_subvp_phantom = 0;
+ int num_subvp_none = 0;
+ int odm_slice_count;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* For single display subvp, look for subvp main so if we have phantom
+ * pipe, we can set odm policy to match main pipe
+ */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+ mall_type = dc_state_get_stream_subvp_type(context, stream);
+ if (mall_type == SUBVP_MAIN)
+ num_subvp_main++;
+ else if (mall_type == SUBVP_PHANTOM)
+ num_subvp_phantom++;
+ else
+ num_subvp_none++;
+ }
+ if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0)
+ single_display_subvp = true;
+
+ if (single_display_subvp) {
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &res_ctx->pipe_ctx[i];
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (mall_type == SUBVP_MAIN) {
+ if (resource_is_pipe_type(pipe, OTG_MASTER))
+ subvp_main_pipe_index = i;
+ }
+ pipe_cnt++;
+ }
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1832,7 +1878,21 @@ int dcn32_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
if (dc->config.enable_windowed_mpo_odm &&
dc->debug.enable_single_display_2to1_odm_policy) {
- switch (resource_get_odm_slice_count(pipe)) {
+ /* For single display subvp, if pipe is phantom pipe,
+ * then copy odm policy from subvp main pipe
+ */
+ mall_type = dc_state_get_pipe_subvp_type(context, pipe);
+ if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) {
+ if (subvp_main_pipe_index < 0) {
+ odm_slice_count = -1;
+ ASSERT(0);
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]);
+ }
+ } else {
+ odm_slice_count = resource_get_odm_slice_count(pipe);
+ }
+ switch (odm_slice_count) {
case 2:
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
break;
@@ -1845,6 +1905,7 @@ int dcn32_populate_dml_pipes_from_context(
} else {
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
}
+
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -1912,6 +1973,22 @@ int dcn32_populate_dml_pipes_from_context(
return pipe_cnt;
}
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes)
+{
+ uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
+
+ /* add 2 lines for worst case alignment */
+ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
+
+ total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
+ lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
+ num_ways = cache_lines_used / lines_per_way;
+ if (cache_lines_used % lines_per_way > 0)
+ num_ways++;
+
+ return num_ways;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = dcn32_subvp_in_use,
@@ -1929,10 +2006,20 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1960,6 +2047,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2048,7 +2136,8 @@ static bool dcn32_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
@@ -2362,30 +2451,10 @@ static bool dcn32_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
@@ -2483,7 +2552,7 @@ struct resource_pool *dcn32_create_resource_pool(
* full update which delays the flip for 1 frame. If we use the original pipe
* we don't have to toggle its power. So we can flip faster.
*/
-static int find_optimal_free_pipe_as_secondary_dpp_pipe(
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
const struct resource_pool *pool,
@@ -2666,7 +2735,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
- free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 2258c5c7212d..fee67fbab8e2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -113,10 +113,6 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel);
-uint32_t dcn32_helper_mall_bytes_to_ways(
- struct dc *dc,
- uint32_t total_size_in_mall_bytes);
-
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -141,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
+int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *new_opp_head);
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
@@ -184,6 +186,8 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 296a0a8e7145..e4b360d89b3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1288,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1579,10 +1581,20 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct dml2_configuration_options dml2_opt = dc->dml2_options;
+
DC_FP_START();
+
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+
+ dml2_opt.use_clock_dc_limits = false;
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
- dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2);
+
+ dml2_opt.use_clock_dc_limits = true;
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source)
+ dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
+
DC_FP_END();
}
@@ -1610,6 +1622,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1697,7 +1710,9 @@ static bool dcn321_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- dc->caps.mall_size_total = 0;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
@@ -1998,30 +2013,10 @@ static bool dcn321_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
-
- dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
- dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
- dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
- dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
+ dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 5d52853cac96..2df8a742516c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
.disable_optc_power_gate = true, /*should the same as above two*/
- .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -764,12 +764,12 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .minimum_z8_residency_time = 2100,
+ .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
- .enable_single_display_2to1_odm_policy = false,
+ .enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
@@ -783,7 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
- .disable_dmub_reallow_idle = true,
+ .disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
};
@@ -1368,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1734,7 +1736,9 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
{
bool out = false;
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
if (fast_validate)
return out;
@@ -2138,15 +2142,9 @@ static bool dcn35_resource_construct(
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index a51c4a9eaafe..f97bb4cb3761 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -240,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool(
SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\
SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\
+ SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\
SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\
SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 5b486400dfdb..ddf9560ab772 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -755,11 +758,13 @@ static const struct dc_debug_options debug_defaults_drv = {
//must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
.enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
- .disable_z10 = true,
+ .disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1343,6 +1348,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@@ -1709,19 +1716,20 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
{
bool out = false;
- out = dml2_validate(dc, context, fast_validate);
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
if (fast_validate)
return out;
DC_FP_START();
- dcn351_decide_zstate_support(dc, context);
+ dcn35_decide_zstate_support(dc, context);
DC_FP_END();
return out;
}
-
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1864,6 +1872,9 @@ static bool dcn351_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ /* Use psp mailbox to enable assr */
+ dc->config.use_assr_psp_message = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1883,6 +1894,8 @@ static bool dcn351_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2113,15 +2126,9 @@ static bool dcn351_resource_construct(
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
- dc->dml2_options.callbacks.dc = dc;
- dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
- dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
- dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
- dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
- dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
- dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
- dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 7785908a6676..2fde1f043d50 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -71,6 +71,8 @@
extern "C" {
#endif
+#define DMUB_PC_SNAPSHOT_COUNT 10
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
@@ -295,18 +297,30 @@ struct dmub_srv_hw_params {
bool dpia_hpd_int_enable_supported;
bool disable_clock_gate;
bool disallow_dispclk_dppclk_ds;
+ bool ips_sequential_ono;
enum dmub_memory_access_type mem_access_type;
enum dmub_ips_disable_type disable_ips;
};
/**
+ * struct dmub_srv_debug - Debug info for dmub_srv
+ * @timeout_occured: Indicates a timeout occured on any message from driver to dmub
+ * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
+ */
+struct dmub_srv_debug {
+ bool timeout_occured;
+ union dmub_rb_cmd timeout_cmd;
+ unsigned long long timestamp;
+};
+
+/**
* struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for
* debugging purposes, including logging, crash analysis, etc.
*/
struct dmub_diagnostic_data {
uint32_t dmcub_version;
uint32_t scratch[17];
- uint32_t pc;
+ uint32_t pc[DMUB_PC_SNAPSHOT_COUNT];
uint32_t undefined_address_fault_addr;
uint32_t inst_fetch_fault_addr;
uint32_t data_write_fault_addr;
@@ -317,6 +331,7 @@ struct dmub_diagnostic_data {
uint32_t inbox0_wptr;
uint32_t inbox0_size;
uint32_t gpint_datain0;
+ struct dmub_srv_debug timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -506,6 +521,7 @@ struct dmub_srv {
struct dmub_visual_confirm_color visual_confirm_color;
enum dmub_srv_power_state_type power_state;
+ struct dmub_srv_debug debug;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index af3fe8bb0728..e85fd3ac52c7 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -97,6 +97,9 @@
/* Maximum number of planes on any ASIC. */
#define DMUB_MAX_PLANES 6
+/* Maximum number of phantom planes on any ASIC */
+#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
+
/* Trace buffer offset for entry */
#define TRACE_BUFFER_ENTRY_OFFSET 16
@@ -194,6 +197,11 @@ union abm_flags {
* of user backlight level.
*/
unsigned int abm_gradual_bl_change : 1;
+
+ /**
+ * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady
+ */
+ unsigned int abm_new_frame : 1;
} bitfields;
unsigned int u32All;
@@ -461,7 +469,7 @@ struct dmub_feature_caps {
* Max PSR version supported by FW.
*/
uint8_t psr;
- uint8_t fw_assisted_mclk_switch;
+ uint8_t fw_assisted_mclk_switch_ver;
uint8_t reserved[4];
uint8_t subvp_psr_support;
uint8_t gecc_enable;
@@ -619,6 +627,7 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2 = 3,
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
+ DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -653,6 +662,7 @@ union dmub_fw_boot_options {
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
uint32_t ips_disable: 3; /* options to disable ips support*/
+ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t reserved : 9; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
@@ -695,7 +705,8 @@ union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
- uint32_t reserved_bits : 30; /**< Reversed */
+ uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
+ uint32_t reserved_bits : 29; /**< Reversed */
} bits;
uint32_t all;
};
@@ -724,7 +735,13 @@ union dmub_shared_state_ips_driver_signals {
*/
struct dmub_shared_state_ips_fw {
union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
- uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t rcg_entry_count; /**< Entry counter for RCG */
+ uint32_t rcg_exit_count; /**< Exit counter for RCG */
+ uint32_t ips1_entry_count; /**< Entry counter for IPS1 */
+ uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
+ uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
+ uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
+ uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -812,6 +829,10 @@ enum dmub_cmd_vbios_type {
*/
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
/**
+ * Control PHY FSM
+ */
+ DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29,
+ /**
* Controls domain power gating
*/
DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
@@ -1186,6 +1207,11 @@ enum dmub_cmd_type {
*/
DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+ /**
+ * Command type used for all PSP commands.
+ */
+ DMUB_CMD__PSP = 88,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1588,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t pad[1];
+ uint8_t reserved[59];
};
/**
@@ -2309,6 +2335,11 @@ enum phy_link_rate {
* UHBR10 - 20.0 Gbps/Lane
*/
PHY_RATE_2000 = 11,
+
+ PHY_RATE_675 = 12,
+ /**
+ * Rate 12 - 6.75 Gbps/Lane
+ */
};
/**
@@ -2327,6 +2358,7 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_POWER_DOWN,
DMUB_PHY_FSM_PLL_EN,
DMUB_PHY_FSM_TX_EN,
+ DMUB_PHY_FSM_TX_EN_TEST_MODE,
DMUB_PHY_FSM_FAST_LP,
DMUB_PHY_FSM_P2_PLL_OFF_CPM,
DMUB_PHY_FSM_P2_PLL_OFF_PG,
@@ -2931,18 +2963,49 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+/**
+ * Definition of Replay Residency GPINT command.
+ * Bit[0] - Residency mode for Revision 0
+ * Bit[1] - Enable/Disable state
+ * Bit[2-3] - Revision number
+ * Bit[4-7] - Residency mode for Revision 1
+ * Bit[8] - Panel instance
+ * Bit[9-15] - Reserved
+ */
+
+enum pr_residency_mode {
+ PR_RESIDENCY_MODE_PHY = 0x0,
+ PR_RESIDENCY_MODE_ALPM,
+ PR_RESIDENCY_MODE_IPS2,
+ PR_RESIDENCY_MODE_FRAME_CNT,
+ PR_RESIDENCY_MODE_ENABLEMENT_PERIOD,
+};
+
#define REPLAY_RESIDENCY_MODE_SHIFT (0)
#define REPLAY_RESIDENCY_ENABLE_SHIFT (1)
+#define REPLAY_RESIDENCY_REVISION_SHIFT (2)
+#define REPLAY_RESIDENCY_MODE2_SHIFT (4)
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
-# define REPLAY_RESIDENCY_MODE_IPS 0x10
+# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+
+#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT)
+# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT)
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT)
+# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT)
+
+/**
+ * Definition of a replay_state.
+ */
enum replay_state {
REPLAY_STATE_0 = 0x0,
REPLAY_STATE_1 = 0x10,
@@ -3004,6 +3067,11 @@ enum dmub_cmd_replay_type {
* Set pseudo vtotal
*/
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
+ /**
+ * Set adaptive sync sdp enabled
+ */
+ DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
+
};
/**
@@ -3205,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal {
*/
uint8_t pad;
};
+struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * enabled: set adaptive sync sdp enabled
+ */
+ uint8_t force_disabled;
+
+ uint8_t pad[2];
+};
/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
@@ -3309,6 +3391,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3363,6 +3459,11 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
*/
struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data.
+ */
+ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
+
};
/**
@@ -3445,7 +3546,7 @@ enum hw_lock_client {
/**
* Replay is the client of HW Lock Manager.
*/
- HW_LOCK_CLIENT_REPLAY = 4,
+ HW_LOCK_CLIENT_REPLAY = 4,
/**
* Invalid client.
*/
@@ -4038,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type {
* Queries backlight info for the embedded panel.
*/
DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1,
+ /**
+ * Sets the PWM Freq as per user's requirement.
+ */
+ DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2,
};
/**
@@ -4139,6 +4244,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt {
struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */
};
+struct phy_test_mode {
+ uint8_t mode;
+ uint8_t pat0;
+ uint8_t pad[2];
+};
+
+/**
+ * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm_data {
+ uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t mode; /**< HDMI/DP/DP2 etc */
+ uint8_t lane_num; /**< Number of lanes */
+ uint32_t symclk_100Hz; /**< PLL symclock in 100hz */
+ struct phy_test_mode test_mode;
+ enum dmub_phy_fsm_state state;
+ uint32_t status;
+ uint8_t pad;
+};
+
+/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+struct dmub_rb_cmd_transmitter_set_phy_fsm {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */
+};
+
/**
* Maximum number of bytes a chunk sent to DMUB for parsing
*/
@@ -4261,6 +4394,65 @@ struct dmub_rb_cmd_secure_display {
};
/**
+ * Command type of a DMUB_CMD__PSP command
+ */
+enum dmub_cmd_psp_type {
+ DMUB_CMD__PSP_ASSR_ENABLE = 0
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_cmd_assr_enable_data {
+ /**
+ * ASSR enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * PHY port type.
+ * Indicates eDP / non-eDP port type
+ */
+ uint8_t phy_port_type;
+ /**
+ * PHY port ID.
+ */
+ uint8_t phy_port_id;
+ /**
+ * Link encoder index.
+ */
+ uint8_t link_enc_index;
+ /**
+ * HPO mode.
+ */
+ uint8_t hpo_mode;
+
+ /**
+ * Reserved field.
+ */
+ uint8_t reserved[7];
+};
+
+/**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+struct dmub_rb_cmd_assr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Assr data.
+ */
+ struct dmub_cmd_assr_enable_data assr_data;
+
+ /**
+ * Reserved field.
+ */
+ uint32_t reserved[3];
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -4451,6 +4643,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt;
/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command.
+ */
+ struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm;
+ /**
* Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
*/
struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
@@ -4518,6 +4714,15 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
*/
struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
+ */
+ struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp;
+ /**
+ * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
+ */
+ struct dmub_rb_cmd_assr_enable assr_enable;
+
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cae96fba6349..e500ca9ae09c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -472,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 2bcf5fb87dd9..662c34e9495c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -466,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+ diag_data->timeout_info = dmub->debug;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 0d521eeda050..e1da270502cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -478,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 53f359f3fae2..70e63aeb8f89 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds;
boot_options.bits.disable_clk_gate = params->disable_clock_gate;
boot_options.bits.ips_disable = params->disable_ips;
+ boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -516,6 +517,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->is_cw6_enabled = is_cw6_enabled;
diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
+ diag_data->timeout_info = dmub->debug;
}
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 1c6f24cb1d2f..447768dec887 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -27,7 +27,6 @@
#define __DAL_TYPES_H__
#include "signal_types.h"
-#include "dc_types.h"
struct dal_logger;
struct dc_bios;
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c6bbd262f1ac..08ee0350b31f 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -226,8 +226,8 @@ enum dp_alt_mode {
struct graphics_object_id {
uint32_t id:8;
- uint32_t enum_id:4;
- uint32_t type:4;
+ enum object_enum_id enum_id;
+ enum object_type type;
uint32_t reserved:16; /* for padding. total size should be u32 */
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 92dbff22a7c6..1867aac57cf2 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -73,7 +73,6 @@ struct link_training_settings {
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
bool should_set_fec_ready;
- /* TODO - factor lane_settings out because it changes during LT */
union dc_dp_ffe_preset *ffe_preset;
uint16_t cr_pattern_time;
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f39e2785e618..83479951732a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -64,6 +64,7 @@
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
+#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__)
struct dc_log_buffer_ctx {
char *buf;
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index 1b14b17a79c7..a10d6b988aab 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+static inline bool dc_is_tmds_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 8b5c27857671..3699e633801d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 min_display;
struct fixed31_32 max_content;
struct fixed31_32 clip = dc_fixpt_one;
- struct fixed31_32 output;
+ struct fixed31_32 output = dc_fixpt_zero;
bool use_eetf = false;
bool is_clipped = false;
struct fixed31_32 sdr_white_level;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 3955b7e4b2e2..d09627c15b9c 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -158,13 +158,13 @@ static unsigned int calc_v_total_from_duration(
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
- if (dc_is_hdmi_signal(stream->signal)) {
+ if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec
uint32_t h_total_up_scaled;
h_total_up_scaled = stream->timing.h_total * 10000;
v_total = div_u64((unsigned long long)duration_in_us
* stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
- h_total_up_scaled);
+ h_total_up_scaled); //ceiling for MMax and MMin for MVRR
} else {
v_total = div64_u64(div64_u64(((unsigned long long)(
duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
@@ -1057,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->fixed_refresh_in_uhz = 0;
refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) -
-+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000);
in_out_vrr->supported = true;
}
@@ -1126,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_min = stream->timing.v_total;
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
}
+
+ in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false;
}
void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 733f22bed021..c996365e84b0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -151,7 +151,7 @@ out:
static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE;
uint8_t size;
uint16_t max_wait = 20; // units of ms
uint16_t num_polls = 5;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index f7b5583ee609..8e9caae7c955 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
+ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
+ return MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 738ee763f24a..a344e2e49b0e 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_feature_enabled) {
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
- }
-
- if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
@@ -539,8 +536,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
break;
case FREESYNC_TYPE_PCON_IN_WHITELIST:
- mod_build_adaptive_sync_infopacket_v1(info_packet);
- break;
case ADAPTIVE_SYNC_TYPE_EDP:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b0a6256e89f4..7536c173a546 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -24,6 +24,7 @@
#define __AMD_SHARED_H__
#include <drm/amd_asic_type.h>
+#include <drm/drm_print.h>
#define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */
@@ -321,6 +322,8 @@ struct amd_ip_funcs {
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
+ void (*dump_ip_state)(void *handle);
+ void (*print_ip_state)(void *handle, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
index f2f8f9b39c6b..fc72c2267060 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
@@ -311,6 +311,10 @@
#define mmPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define mmPHYFSYMCLK_CLOCK_CNTL 0x0057
#define mmPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
+#define mmHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define mmHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
@@ -4513,6 +4517,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5201,6 +5209,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5888,6 +5900,10 @@
#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee
#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6576,6 +6592,10 @@
#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259
#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x125b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7264,6 +7284,10 @@
#define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4
#define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5
+#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6
+#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7952,6 +7976,10 @@
#define mmCM5_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM5_CM_3DLUT_OUT_OFFSET_B 0x152f
#define mmCM5_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM5_CM_TEST_DEBUG_INDEX 0x1530
+#define mmCM5_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM5_CM_TEST_DEBUG_DATA 0x1531
+#define mmCM5_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp5_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
index e0a447351623..daf71e82f0ba 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
@@ -1189,6 +1189,11 @@
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000010L
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
@@ -16739,6 +16744,15 @@
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON12_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
index b45a35aae241..bf84f97d9162 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
@@ -4466,6 +4466,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5154,6 +5158,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5841,6 +5849,10 @@
#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee
#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6529,6 +6541,10 @@
#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259
#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x125b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -7217,6 +7233,10 @@
#define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4
#define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5
+#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6
+#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
index 3dae29f9581e..56cdb219874a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
@@ -15676,6 +15676,14 @@
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
index daa8130636f0..8b0d2638a6b0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h
@@ -3110,6 +3110,10 @@
#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18
#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -3798,6 +3802,10 @@
#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83
#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5687,6 +5695,16 @@
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e
+#define mmDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -5817,6 +5835,16 @@
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a
+#define mmDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
index 5c469cf635e5..53f1705f8d99 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h
@@ -10701,6 +10701,13 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
//CM0_CM_SHAPER_CONTROL
#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_MODE_CURRENT__SHIFT 0x2
@@ -22258,7 +22265,9 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -22631,6 +22640,15 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
index f268d33c4744..7fd906f10803 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
@@ -424,6 +424,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -434,6 +436,8 @@
#define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYESYMCLK_CLOCK_CNTL 0x0056
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
index cf3398f15666..07fbfafe6056 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
@@ -1372,6 +1372,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -1397,6 +1402,13 @@
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x4
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00000010L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
@@ -46978,6 +46990,13 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+
+
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
index 50c34d88c17c..16a69d17bb1e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h
@@ -213,6 +213,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -233,6 +235,8 @@
#define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2
#define regDTBCLK_DTO_DBUF_EN 0x0063
#define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
index 295e0dac9ffa..6473362e39a8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h
@@ -886,6 +886,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -911,6 +916,11 @@
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
index 14c29ce4c7b3..78cb61d5800a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h
@@ -1719,6 +1719,10 @@
#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
#define regFMON_CTRL 0x0541
#define regFMON_CTRL_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542
+#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543
+#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dchubbubl_hubbub_sdpif_dispdec
@@ -3574,6 +3578,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec
@@ -3960,6 +3968,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp1_dispdec_dpp_top_dispdec
@@ -4346,6 +4358,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp2_dispdec_dpp_top_dispdec
@@ -4732,6 +4748,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dcn_dc_dpp3_dispdec_dpp_top_dispdec
@@ -11780,6 +11800,16 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c
+#define regDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d
+#define regDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e
+#define regDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec
@@ -11888,6 +11918,16 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098
+#define regDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099
+#define regDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a
+#define regDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc1_dispdec_dsccif_dispdec
@@ -11996,6 +12036,16 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA1 0x30f4
+#define regDSCC2_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA2 0x30f5
+#define regDSCC2_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA3 0x30f6
+#define regDSCC2_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc2_dispdec_dsccif_dispdec
@@ -12104,6 +12154,16 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA1 0x3150
+#define regDSCC3_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA2 0x3151
+#define regDSCC3_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA3 0x3152
+#define regDSCC3_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2
// addressBlock: dcn_dc_dsc3_dispdec_dsccif_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
index 0691e328d0f0..1093105ca35b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h
@@ -11544,6 +11544,11 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
// addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec
@@ -42267,6 +42272,18 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_INDEX2
+#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2_MASK 0x000000FFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec
@@ -42300,6 +42317,16 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
index 3bd8792fd7b3..a04b8c32c564 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h
@@ -1719,6 +1719,10 @@
#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
#define regFMON_CTRL 0x0541
#define regFMON_CTRL_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542
+#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543
+#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
@@ -3573,6 +3577,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
@@ -3959,6 +3967,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
@@ -4345,6 +4357,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
@@ -4731,6 +4747,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
@@ -11789,6 +11809,10 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b
+#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -11897,6 +11921,10 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097
+#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
@@ -12005,7 +12033,10 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
-
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3
+#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
// base address: 0x2e0
@@ -12113,6 +12144,10 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f
+#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2
// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
index e82dffc2b9b0..ce773fca621f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h
@@ -11547,6 +11547,11 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
@@ -42315,6 +42320,15 @@
//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
@@ -42348,7 +42362,9 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
//DSC_TOP0_DSC_DEBUG_CONTROL
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
index 0bb47e06eee8..081e726afbf0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
@@ -24,6 +24,8 @@
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
@@ -50,6 +52,8 @@
#define mmRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
@@ -120,6 +124,8 @@
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
@@ -146,6 +152,8 @@
#define mmRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
@@ -216,6 +224,8 @@
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
@@ -242,6 +252,8 @@
#define mmRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
@@ -312,6 +324,8 @@
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
@@ -338,6 +352,8 @@
#define mmRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
@@ -408,6 +424,8 @@
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
@@ -434,6 +452,8 @@
#define mmRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
@@ -504,6 +524,8 @@
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65
#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
@@ -530,6 +552,8 @@
#define mmRDPCSTX5_RDPCSTX_CNTL2_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74
#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
index 23fa1121a967..1f846fa6c1a2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
@@ -70,7 +70,9 @@
//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
-
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
//RDPCSTX0_RDPCSTX_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
index 55743d06f728..e55ff0e8d74c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h
@@ -70,7 +70,9 @@
//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
-
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
//RDPCSTX0_RDPCSTX_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
index 01a56556cde1..5b4fdeda1040 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
@@ -155,6 +155,8 @@
#define regRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
#define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
#define regRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
@@ -239,6 +241,8 @@
#define regRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
#define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
#define regRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
@@ -323,6 +327,8 @@
#define regRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
#define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
#define regRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
@@ -407,6 +413,8 @@
#define regRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
#define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
#define regRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
@@ -491,6 +499,8 @@
#define regRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
#define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
#define regRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
#define regRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 4908044f7409..4c8e7fdb6976 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -4830,6 +4830,8 @@
#define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
#define mmGB_EDC_MODE 0x1e1e
#define mmGB_EDC_MODE_BASE_IDX 0
+#define mmCP_DEBUG 0x1e1f
+#define mmCP_DEBUG_BASE_IDX 0
#define mmCP_FETCHER_SOURCE 0x1e22
#define mmCP_FETCHER_SOURCE_BASE_IDX 0
#define mmCP_PQ_WPTR_POLL_CNTL 0x1e23
@@ -7778,6 +7780,8 @@
#define mmCP_MES_DOORBELL_CONTROL5_BASE_IDX 1
#define mmCP_MES_DOORBELL_CONTROL6 0x2841
#define mmCP_MES_DOORBELL_CONTROL6_BASE_IDX 1
+#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR 0x2842
+#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR_BASE_IDX 1
#define mmCP_MES_GP0_LO 0x2843
#define mmCP_MES_GP0_LO_BASE_IDX 1
#define mmCP_MES_GP0_HI 0x2844
@@ -9332,10 +9336,16 @@
#define mmRLC_LB_CNTR_INIT_1_BASE_IDX 1
#define mmRLC_LB_CNTR_1 0x4c1c
#define mmRLC_LB_CNTR_1_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_ADDR 0x4c1d
+#define mmRLC_GPM_DEBUG_INST_ADDR_BASE_IDX 1
#define mmRLC_JUMP_TABLE_RESTORE 0x4c1e
#define mmRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
#define mmRLC_PG_DELAY_2 0x4c1f
#define mmRLC_PG_DELAY_2_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_A 0x4c22
+#define mmRLC_GPM_DEBUG_INST_A_BASE_IDX 1
+#define mmRLC_GPM_DEBUG_INST_B 0x4c23
+#define mmRLC_GPM_DEBUG_INST_B_BASE_IDX 1
#define mmRLC_GPU_CLOCK_COUNT_LSB 0x4c24
#define mmRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
#define mmRLC_GPU_CLOCK_COUNT_MSB 0x4c25
@@ -9720,6 +9730,8 @@
#define mmRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1
#define mmRLC_LB_CNTR_2 0x4de7
#define mmRLC_LB_CNTR_2_BASE_IDX 1
+#define mmRLC_LX6_CORE_PDEBUG_INST 0x4deb
+#define mmRLC_LX6_CORE_PDEBUG_INST_BASE_IDX 1
#define mmRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1
#define mmRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1
#define mmRLC_CPAXI_DOORBELL_MON_STAT 0x4df2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index efc16ddf274a..2dfa0e5b1aa3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6822,6 +6822,8 @@
#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
@@ -6832,6 +6834,8 @@
#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
//VM_L2_PROTECTION_FAULT_ADDR_LO32
#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
index 8b931bbabe70..969e006b859b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
@@ -237,6 +237,10 @@
#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
#define regIH_CLIENT_CFG 0x0184
#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_INDEX 0x0185
+#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA 0x0186
+#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
#define regIH_CLIENT_CFG_INDEX 0x0188
#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
#define regIH_CLIENT_CFG_DATA 0x0189
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
index f262f44fa68c..a672a91e58f0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
@@ -888,6 +888,16 @@
//IH_CLIENT_CFG
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL
+//IH_RING1_CLIENT_CFG_INDEX
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
+//IH_RING1_CLIENT_CFG_DATA
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
//IH_CLIENT_CFG_INDEX
#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h
new file mode 100644
index 000000000000..da7e31fedd58
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_14_0_2_OFFSET_HEADER
+#define _smuio_14_0_2_OFFSET_HEADER
+
+
+
+// addressBlock: smuio_smuio_tsc_SmuSmuioDec
+// base address: 0x5a8a0
+#define regPWROK_REFCLK_GAP_CYCLES 0x0028
+#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
+#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b
+#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
+#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c
+#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
+#define regGOLDEN_TSC_COUNT_UPPER 0x002d
+#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
+#define regGOLDEN_TSC_COUNT_LOWER 0x002e
+#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define regSOC_GAP_PWROK 0x0031
+#define regSOC_GAP_PWROK_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+// base address: 0x5aca8
+#define regPWR_VIRT_RESET_REQ 0x012a
+#define regPWR_VIRT_RESET_REQ_BASE_IDX 1
+#define regPWR_DISP_TIMER_CONTROL 0x012b
+#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 1
+#define regPWR_DISP_TIMER_DEBUG 0x012c
+#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 1
+#define regPWR_DISP_TIMER2_CONTROL 0x012d
+#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
+#define regPWR_DISP_TIMER2_DEBUG 0x012e
+#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 1
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x012f
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
+#define regPWR_IH_CONTROL 0x0130
+#define regPWR_IH_CONTROL_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+// base address: 0x5a000
+#define regSMUIO_MCM_CONFIG 0x0023
+#define regSMUIO_MCM_CONFIG_BASE_IDX 0
+#define regIP_DISCOVERY_VERSION 0x0000
+#define regIP_DISCOVERY_VERSION_BASE_IDX 1
+#define regSCRATCH_REGISTER0 0x01bd
+#define regSCRATCH_REGISTER0_BASE_IDX 1
+#define regSCRATCH_REGISTER1 0x01be
+#define regSCRATCH_REGISTER1_BASE_IDX 1
+#define regSCRATCH_REGISTER2 0x01bf
+#define regSCRATCH_REGISTER2_BASE_IDX 1
+#define regSCRATCH_REGISTER3 0x01c0
+#define regSCRATCH_REGISTER3_BASE_IDX 1
+#define regSCRATCH_REGISTER4 0x01c1
+#define regSCRATCH_REGISTER4_BASE_IDX 1
+#define regSCRATCH_REGISTER5 0x01c2
+#define regSCRATCH_REGISTER5_BASE_IDX 1
+#define regSCRATCH_REGISTER6 0x01c3
+#define regSCRATCH_REGISTER6_BASE_IDX 1
+#define regSCRATCH_REGISTER7 0x01c4
+#define regSCRATCH_REGISTER7_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_i2c_SmuSmuioDec
+// base address: 0x5a100
+#define regCKSVII2C_IC_CON 0x0040
+#define regCKSVII2C_IC_CON_BASE_IDX 0
+#define regCKSVII2C_IC_TAR 0x0041
+#define regCKSVII2C_IC_TAR_BASE_IDX 0
+#define regCKSVII2C_IC_SAR 0x0042
+#define regCKSVII2C_IC_SAR_BASE_IDX 0
+#define regCKSVII2C_IC_HS_MADDR 0x0043
+#define regCKSVII2C_IC_HS_MADDR_BASE_IDX 0
+#define regCKSVII2C_IC_DATA_CMD 0x0044
+#define regCKSVII2C_IC_DATA_CMD_BASE_IDX 0
+#define regCKSVII2C_IC_SS_SCL_HCNT 0x0045
+#define regCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_SS_SCL_LCNT 0x0046
+#define regCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SCL_HCNT 0x0047
+#define regCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SCL_LCNT 0x0048
+#define regCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SCL_HCNT 0x0049
+#define regCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SCL_LCNT 0x004a
+#define regCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C_IC_INTR_STAT 0x004b
+#define regCKSVII2C_IC_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C_IC_INTR_MASK 0x004c
+#define regCKSVII2C_IC_INTR_MASK_BASE_IDX 0
+#define regCKSVII2C_IC_RAW_INTR_STAT 0x004d
+#define regCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C_IC_RX_TL 0x004e
+#define regCKSVII2C_IC_RX_TL_BASE_IDX 0
+#define regCKSVII2C_IC_TX_TL 0x004f
+#define regCKSVII2C_IC_TX_TL_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_INTR 0x0050
+#define regCKSVII2C_IC_CLR_INTR_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_UNDER 0x0051
+#define regCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_OVER 0x0052
+#define regCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_TX_OVER 0x0053
+#define regCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RD_REQ 0x0054
+#define regCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_TX_ABRT 0x0055
+#define regCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RX_DONE 0x0056
+#define regCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_ACTIVITY 0x0057
+#define regCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_STOP_DET 0x0058
+#define regCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_START_DET 0x0059
+#define regCKSVII2C_IC_CLR_START_DET_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_GEN_CALL 0x005a
+#define regCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0
+#define regCKSVII2C_IC_ENABLE 0x005b
+#define regCKSVII2C_IC_ENABLE_BASE_IDX 0
+#define regCKSVII2C_IC_STATUS 0x005c
+#define regCKSVII2C_IC_STATUS_BASE_IDX 0
+#define regCKSVII2C_IC_TXFLR 0x005d
+#define regCKSVII2C_IC_TXFLR_BASE_IDX 0
+#define regCKSVII2C_IC_RXFLR 0x005e
+#define regCKSVII2C_IC_RXFLR_BASE_IDX 0
+#define regCKSVII2C_IC_SDA_HOLD 0x005f
+#define regCKSVII2C_IC_SDA_HOLD_BASE_IDX 0
+#define regCKSVII2C_IC_TX_ABRT_SOURCE 0x0060
+#define regCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061
+#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_CR 0x0062
+#define regCKSVII2C_IC_DMA_CR_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_TDLR 0x0063
+#define regCKSVII2C_IC_DMA_TDLR_BASE_IDX 0
+#define regCKSVII2C_IC_DMA_RDLR 0x0064
+#define regCKSVII2C_IC_DMA_RDLR_BASE_IDX 0
+#define regCKSVII2C_IC_SDA_SETUP 0x0065
+#define regCKSVII2C_IC_SDA_SETUP_BASE_IDX 0
+#define regCKSVII2C_IC_ACK_GENERAL_CALL 0x0066
+#define regCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define regCKSVII2C_IC_ENABLE_STATUS 0x0067
+#define regCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0
+#define regCKSVII2C_IC_FS_SPKLEN 0x0068
+#define regCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C_IC_HS_SPKLEN 0x0069
+#define regCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C_IC_CLR_RESTART_DET 0x006a
+#define regCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_PARAM_1 0x006d
+#define regCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_VERSION 0x006e
+#define regCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
+#define regCKSVII2C_IC_COMP_TYPE 0x006f
+#define regCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define regCKSVII2C1_IC_CON 0x0080
+#define regCKSVII2C1_IC_CON_BASE_IDX 0
+#define regCKSVII2C1_IC_TAR 0x0081
+#define regCKSVII2C1_IC_TAR_BASE_IDX 0
+#define regCKSVII2C1_IC_SAR 0x0082
+#define regCKSVII2C1_IC_SAR_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_MADDR 0x0083
+#define regCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define regCKSVII2C1_IC_DATA_CMD 0x0084
+#define regCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define regCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define regCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define regCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define regCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define regCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define regCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define regCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define regCKSVII2C1_IC_INTR_STAT 0x008b
+#define regCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C1_IC_INTR_MASK 0x008c
+#define regCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define regCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define regCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define regCKSVII2C1_IC_RX_TL 0x008e
+#define regCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define regCKSVII2C1_IC_TX_TL 0x008f
+#define regCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_INTR 0x0090
+#define regCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define regCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define regCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define regCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define regCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define regCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define regCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define regCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define regCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_START_DET 0x0099
+#define regCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define regCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define regCKSVII2C1_IC_ENABLE 0x009b
+#define regCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define regCKSVII2C1_IC_STATUS 0x009c
+#define regCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define regCKSVII2C1_IC_TXFLR 0x009d
+#define regCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define regCKSVII2C1_IC_RXFLR 0x009e
+#define regCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define regCKSVII2C1_IC_SDA_HOLD 0x009f
+#define regCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define regCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define regCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_CR 0x00a2
+#define regCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define regCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define regCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define regCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define regCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define regCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define regCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define regCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define regCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define regCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define regCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define regCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define regCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define regCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define regCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_PARAM_1 0x00ad
+#define regCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_VERSION 0x00ae
+#define regCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define regCKSVII2C1_IC_COMP_TYPE 0x00af
+#define regCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
+#define regSMUIO_PWRMGT 0x018c
+#define regSMUIO_PWRMGT_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_rom_SmuSmuioDec
+// base address: 0x5a380
+#define regROM_CNTL 0x00e0
+#define regROM_CNTL_BASE_IDX 0
+#define regPAGE_MIRROR_CNTL 0x00e1
+#define regPAGE_MIRROR_CNTL_BASE_IDX 0
+#define regROM_STATUS 0x00e2
+#define regROM_STATUS_BASE_IDX 0
+#define regCGTT_ROM_CLK_CTRL0 0x00e3
+#define regCGTT_ROM_CLK_CTRL0_BASE_IDX 0
+#define regROM_INDEX 0x00e4
+#define regROM_INDEX_BASE_IDX 0
+#define regROM_DATA 0x00e5
+#define regROM_DATA_BASE_IDX 0
+#define regROM_START 0x00e6
+#define regROM_START_BASE_IDX 0
+#define regROM_SW_CNTL 0x00e8
+#define regROM_SW_CNTL_BASE_IDX 0
+#define regROM_SW_STATUS 0x00e9
+#define regROM_SW_STATUS_BASE_IDX 0
+#define regROM_SW_COMMAND 0x00ea
+#define regROM_SW_COMMAND_BASE_IDX 0
+#define regROM_SW_DATA_1 0x00ec
+#define regROM_SW_DATA_1_BASE_IDX 0
+#define regROM_SW_DATA_2 0x00ed
+#define regROM_SW_DATA_2_BASE_IDX 0
+#define regROM_SW_DATA_3 0x00ee
+#define regROM_SW_DATA_3_BASE_IDX 0
+#define regROM_SW_DATA_4 0x00ef
+#define regROM_SW_DATA_4_BASE_IDX 0
+#define regROM_SW_DATA_5 0x00f0
+#define regROM_SW_DATA_5_BASE_IDX 0
+#define regROM_SW_DATA_6 0x00f1
+#define regROM_SW_DATA_6_BASE_IDX 0
+#define regROM_SW_DATA_7 0x00f2
+#define regROM_SW_DATA_7_BASE_IDX 0
+#define regROM_SW_DATA_8 0x00f3
+#define regROM_SW_DATA_8_BASE_IDX 0
+#define regROM_SW_DATA_9 0x00f4
+#define regROM_SW_DATA_9_BASE_IDX 0
+#define regROM_SW_DATA_10 0x00f5
+#define regROM_SW_DATA_10_BASE_IDX 0
+#define regROM_SW_DATA_11 0x00f6
+#define regROM_SW_DATA_11_BASE_IDX 0
+#define regROM_SW_DATA_12 0x00f7
+#define regROM_SW_DATA_12_BASE_IDX 0
+#define regROM_SW_DATA_13 0x00f8
+#define regROM_SW_DATA_13_BASE_IDX 0
+#define regROM_SW_DATA_14 0x00f9
+#define regROM_SW_DATA_14_BASE_IDX 0
+#define regROM_SW_DATA_15 0x00fa
+#define regROM_SW_DATA_15_BASE_IDX 0
+#define regROM_SW_DATA_16 0x00fb
+#define regROM_SW_DATA_16_BASE_IDX 0
+#define regROM_SW_DATA_17 0x00fc
+#define regROM_SW_DATA_17_BASE_IDX 0
+#define regROM_SW_DATA_18 0x00fd
+#define regROM_SW_DATA_18_BASE_IDX 0
+#define regROM_SW_DATA_19 0x00fe
+#define regROM_SW_DATA_19_BASE_IDX 0
+#define regROM_SW_DATA_20 0x00ff
+#define regROM_SW_DATA_20_BASE_IDX 0
+#define regROM_SW_DATA_21 0x0100
+#define regROM_SW_DATA_21_BASE_IDX 0
+#define regROM_SW_DATA_22 0x0101
+#define regROM_SW_DATA_22_BASE_IDX 0
+#define regROM_SW_DATA_23 0x0102
+#define regROM_SW_DATA_23_BASE_IDX 0
+#define regROM_SW_DATA_24 0x0103
+#define regROM_SW_DATA_24_BASE_IDX 0
+#define regROM_SW_DATA_25 0x0104
+#define regROM_SW_DATA_25_BASE_IDX 0
+#define regROM_SW_DATA_26 0x0105
+#define regROM_SW_DATA_26_BASE_IDX 0
+#define regROM_SW_DATA_27 0x0106
+#define regROM_SW_DATA_27_BASE_IDX 0
+#define regROM_SW_DATA_28 0x0107
+#define regROM_SW_DATA_28_BASE_IDX 0
+#define regROM_SW_DATA_29 0x0108
+#define regROM_SW_DATA_29_BASE_IDX 0
+#define regROM_SW_DATA_30 0x0109
+#define regROM_SW_DATA_30_BASE_IDX 0
+#define regROM_SW_DATA_31 0x010a
+#define regROM_SW_DATA_31_BASE_IDX 0
+#define regROM_SW_DATA_32 0x010b
+#define regROM_SW_DATA_32_BASE_IDX 0
+#define regROM_SW_DATA_33 0x010c
+#define regROM_SW_DATA_33_BASE_IDX 0
+#define regROM_SW_DATA_34 0x010d
+#define regROM_SW_DATA_34_BASE_IDX 0
+#define regROM_SW_DATA_35 0x010e
+#define regROM_SW_DATA_35_BASE_IDX 0
+#define regROM_SW_DATA_36 0x010f
+#define regROM_SW_DATA_36_BASE_IDX 0
+#define regROM_SW_DATA_37 0x0110
+#define regROM_SW_DATA_37_BASE_IDX 0
+#define regROM_SW_DATA_38 0x0111
+#define regROM_SW_DATA_38_BASE_IDX 0
+#define regROM_SW_DATA_39 0x0112
+#define regROM_SW_DATA_39_BASE_IDX 0
+#define regROM_SW_DATA_40 0x0113
+#define regROM_SW_DATA_40_BASE_IDX 0
+#define regROM_SW_DATA_41 0x0114
+#define regROM_SW_DATA_41_BASE_IDX 0
+#define regROM_SW_DATA_42 0x0115
+#define regROM_SW_DATA_42_BASE_IDX 0
+#define regROM_SW_DATA_43 0x0116
+#define regROM_SW_DATA_43_BASE_IDX 0
+#define regROM_SW_DATA_44 0x0117
+#define regROM_SW_DATA_44_BASE_IDX 0
+#define regROM_SW_DATA_45 0x0118
+#define regROM_SW_DATA_45_BASE_IDX 0
+#define regROM_SW_DATA_46 0x0119
+#define regROM_SW_DATA_46_BASE_IDX 0
+#define regROM_SW_DATA_47 0x011a
+#define regROM_SW_DATA_47_BASE_IDX 0
+#define regROM_SW_DATA_48 0x011b
+#define regROM_SW_DATA_48_BASE_IDX 0
+#define regROM_SW_DATA_49 0x011c
+#define regROM_SW_DATA_49_BASE_IDX 0
+#define regROM_SW_DATA_50 0x011d
+#define regROM_SW_DATA_50_BASE_IDX 0
+#define regROM_SW_DATA_51 0x011e
+#define regROM_SW_DATA_51_BASE_IDX 0
+#define regROM_SW_DATA_52 0x011f
+#define regROM_SW_DATA_52_BASE_IDX 0
+#define regROM_SW_DATA_53 0x0120
+#define regROM_SW_DATA_53_BASE_IDX 0
+#define regROM_SW_DATA_54 0x0121
+#define regROM_SW_DATA_54_BASE_IDX 0
+#define regROM_SW_DATA_55 0x0122
+#define regROM_SW_DATA_55_BASE_IDX 0
+#define regROM_SW_DATA_56 0x0123
+#define regROM_SW_DATA_56_BASE_IDX 0
+#define regROM_SW_DATA_57 0x0124
+#define regROM_SW_DATA_57_BASE_IDX 0
+#define regROM_SW_DATA_58 0x0125
+#define regROM_SW_DATA_58_BASE_IDX 0
+#define regROM_SW_DATA_59 0x0126
+#define regROM_SW_DATA_59_BASE_IDX 0
+#define regROM_SW_DATA_60 0x0127
+#define regROM_SW_DATA_60_BASE_IDX 0
+#define regROM_SW_DATA_61 0x0128
+#define regROM_SW_DATA_61_BASE_IDX 0
+#define regROM_SW_DATA_62 0x0129
+#define regROM_SW_DATA_62_BASE_IDX 0
+#define regROM_SW_DATA_63 0x012a
+#define regROM_SW_DATA_63_BASE_IDX 0
+#define regROM_SW_DATA_64 0x012b
+#define regROM_SW_DATA_64_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_gpio_SmuSmuioDec
+// base address: 0x5a500
+#define regSMU_GPIOPAD_SW_INT_STAT 0x0140
+#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MASK 0x0141
+#define regSMU_GPIOPAD_MASK_BASE_IDX 0
+#define regSMU_GPIOPAD_A 0x0142
+#define regSMU_GPIOPAD_A_BASE_IDX 0
+#define regSMU_GPIOPAD_TXIMPSEL 0x0143
+#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 0
+#define regSMU_GPIOPAD_EN 0x0144
+#define regSMU_GPIOPAD_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_Y 0x0145
+#define regSMU_GPIOPAD_Y_BASE_IDX 0
+#define regSMU_GPIOPAD_RXEN 0x0146
+#define regSMU_GPIOPAD_RXEN_BASE_IDX 0
+#define regSMU_GPIOPAD_RCVR_SEL0 0x0147
+#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 0
+#define regSMU_GPIOPAD_RCVR_SEL1 0x0148
+#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 0
+#define regSMU_GPIOPAD_PU_EN 0x0149
+#define regSMU_GPIOPAD_PU_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_PD_EN 0x014a
+#define regSMU_GPIOPAD_PD_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_PINSTRAPS 0x014b
+#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 0
+#define regDFT_PINSTRAPS 0x014c
+#define regDFT_PINSTRAPS_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT_EN 0x014d
+#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT 0x014e
+#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_STAT_AK 0x014f
+#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_EN 0x0150
+#define regSMU_GPIOPAD_INT_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_TYPE 0x0151
+#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 0
+#define regSMU_GPIOPAD_INT_POLARITY 0x0152
+#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 0
+#define regSMUIO_PCC_GPIO_SELECT 0x0155
+#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 0
+#define regSMU_GPIOPAD_S0 0x0156
+#define regSMU_GPIOPAD_S0_BASE_IDX 0
+#define regSMU_GPIOPAD_S1 0x0157
+#define regSMU_GPIOPAD_S1_BASE_IDX 0
+#define regSMU_GPIOPAD_SCHMEN 0x0158
+#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 0
+#define regSMU_GPIOPAD_SCL_EN 0x0159
+#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 0
+#define regSMU_GPIOPAD_SDA_EN 0x015a
+#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 0
+#define regSMUIO_GPIO_INT0_SELECT 0x015b
+#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT1_SELECT 0x015c
+#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT2_SELECT 0x015d
+#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 0
+#define regSMUIO_GPIO_INT3_SELECT 0x015e
+#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f
+#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160
+#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161
+#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 0
+#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162
+#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 0
+#define regSMIO_INDEX 0x0163
+#define regSMIO_INDEX_BASE_IDX 0
+#define regS0_VID_SMIO_CNTL 0x0164
+#define regS0_VID_SMIO_CNTL_BASE_IDX 0
+#define regS1_VID_SMIO_CNTL 0x0165
+#define regS1_VID_SMIO_CNTL_BASE_IDX 0
+#define regOPEN_DRAIN_SELECT 0x0166
+#define regOPEN_DRAIN_SELECT_BASE_IDX 0
+#define regSMIO_ENABLE 0x0167
+#define regSMIO_ENABLE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h
new file mode 100644
index 000000000000..6204505e553b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h
@@ -0,0 +1,1106 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_14_0_2_SH_MASK_HEADER
+#define _smuio_14_0_2_SH_MASK_HEADER
+
+
+// addressBlock: smuio_smuio_tsc_SmuSmuioDec
+//PWROK_REFCLK_GAP_CYCLES
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
+//GOLDEN_TSC_INCREMENT_UPPER
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_INCREMENT_LOWER
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
+//GOLDEN_TSC_COUNT_UPPER
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_COUNT_LOWER
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_UPPER
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_LOWER
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GAP_PWROK
+#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
+#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
+
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+//PWR_VIRT_RESET_REQ
+#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
+#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
+//PWR_DISP_TIMER_CONTROL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER_DEBUG
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER2_CONTROL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER2_DEBUG
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER_GLOBAL_CONTROL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
+//PWR_IH_CONTROL
+#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
+#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
+
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+//SMUIO_MCM_CONFIG
+#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
+#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
+#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc
+#define SMUIO_MCM_CONFIG__DIE_CONFIG__SHIFT 0xd
+#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
+#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
+#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
+#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
+#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000300L
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L
+#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
+#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
+//IP_DISCOVERY_VERSION
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER0
+#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
+#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER1
+#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
+#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER2
+#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
+#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER3
+#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
+#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER4
+#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
+#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER5
+#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
+#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER6
+#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
+#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER7
+#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
+#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: smuio_smuio_i2c_SmuSmuioDec
+//CKSVII2C_IC_CON
+#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL__SHIFT 0xb
+#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C_IC_TAR
+#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0
+#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa
+#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL
+#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L
+#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L
+#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C_IC_SAR
+#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0
+#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL
+//CKSVII2C_IC_HS_MADDR
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0
+#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L
+//CKSVII2C_IC_DATA_CMD
+#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0
+#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8
+#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9
+#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa
+#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE__SHIFT 0xb
+#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL
+#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L
+#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L
+#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L
+//CKSVII2C_IC_SS_SCL_HCNT
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_SS_SCL_LCNT
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_HCNT
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_FS_SCL_LCNT
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_HCNT
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_HS_SCL_LCNT
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C_IC_INTR_STAT
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_INTR_MASK
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9
+#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L
+#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L
+#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L
+#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L
+#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L
+#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L
+#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L
+#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C_IC_RAW_INTR_STAT
+//CKSVII2C_IC_RX_TL
+#define CKSVII2C_IC_RX_TL__RX_TL__SHIFT 0x0
+//CKSVII2C_IC_TX_TL
+#define CKSVII2C_IC_TX_TL__TX_TL__SHIFT 0x0
+//CKSVII2C_IC_CLR_INTR
+//CKSVII2C_IC_CLR_RX_UNDER
+//CKSVII2C_IC_CLR_RX_OVER
+//CKSVII2C_IC_CLR_TX_OVER
+//CKSVII2C_IC_CLR_RD_REQ
+//CKSVII2C_IC_CLR_TX_ABRT
+//CKSVII2C_IC_CLR_RX_DONE
+//CKSVII2C_IC_CLR_ACTIVITY
+//CKSVII2C_IC_CLR_STOP_DET
+//CKSVII2C_IC_CLR_START_DET
+//CKSVII2C_IC_CLR_GEN_CALL
+//CKSVII2C_IC_ENABLE
+#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK__SHIFT 0x2
+#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE__SHIFT 0x3
+#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L
+#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L
+//CKSVII2C_IC_STATUS
+#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0
+#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1
+#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2
+#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3
+#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6
+#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
+#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL__SHIFT 0x8
+#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
+#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL__SHIFT 0xa
+#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED__SHIFT 0xb
+#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L
+#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L
+#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L
+#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L
+#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L
+#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L
+//CKSVII2C_IC_TXFLR
+#define CKSVII2C_IC_TXFLR__TXFLR__SHIFT 0x0
+//CKSVII2C_IC_RXFLR
+#define CKSVII2C_IC_RXFLR__RXFLR__SHIFT 0x0
+//CKSVII2C_IC_SDA_HOLD
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD__SHIFT 0x0
+#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD__SHIFT 0x10
+//CKSVII2C_IC_TX_ABRT_SOURCE
+//CKSVII2C_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C_IC_DMA_CR
+//CKSVII2C_IC_DMA_TDLR
+//CKSVII2C_IC_DMA_RDLR
+//CKSVII2C_IC_SDA_SETUP
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0
+#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL
+//CKSVII2C_IC_ACK_GENERAL_CALL
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C_IC_ENABLE_STATUS
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY__SHIFT 0x1
+#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST__SHIFT 0x2
+#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L
+//CKSVII2C_IC_FS_SPKLEN
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_HS_SPKLEN
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0
+#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL
+//CKSVII2C_IC_CLR_RESTART_DET
+//CKSVII2C_IC_COMP_PARAM_1
+#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH__SHIFT 0x0
+#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE__SHIFT 0x2
+#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES__SHIFT 0x4
+#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO__SHIFT 0x5
+#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA__SHIFT 0x6
+#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS__SHIFT 0x7
+#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH__SHIFT 0x8
+#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH__SHIFT 0x10
+//CKSVII2C_IC_COMP_VERSION
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0
+#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL
+//CKSVII2C_IC_COMP_TYPE
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
+#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1__SHIFT 0xb
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE__SHIFT 0xb
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW__SHIFT 0xe
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+#define CKSVII2C1_IC_RX_TL__RX1_TL__SHIFT 0x0
+//CKSVII2C1_IC_TX_TL
+#define CKSVII2C1_IC_TX_TL__TX1_TL__SHIFT 0x0
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE__SHIFT 0x3
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
+#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL__SHIFT 0x8
+#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
+#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL__SHIFT 0xa
+#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED__SHIFT 0xb
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+#define CKSVII2C1_IC_TXFLR__TXFLR1__SHIFT 0x0
+//CKSVII2C1_IC_RXFLR
+#define CKSVII2C1_IC_RXFLR__RXFLR1__SHIFT 0x0
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD__SHIFT 0x10
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+//CKSVII2C1_IC_FS_SPKLEN
+#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN__SHIFT 0x0
+//CKSVII2C1_IC_HS_SPKLEN
+#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN__SHIFT 0x0
+//CKSVII2C1_IC_CLR_RESTART_DET
+//CKSVII2C1_IC_COMP_PARAM_1
+#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH__SHIFT 0x0
+#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE__SHIFT 0x2
+#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES__SHIFT 0x4
+#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO__SHIFT 0x5
+#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA__SHIFT 0x6
+#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS__SHIFT 0x7
+#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH__SHIFT 0x8
+#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH__SHIFT 0x10
+//CKSVII2C1_IC_COMP_VERSION
+#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION__SHIFT 0x0
+//CKSVII2C1_IC_COMP_TYPE
+#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE__SHIFT 0x0
+//SMUIO_PWRMGT
+#define SMUIO_PWRMGT__i2c_clk_gate_en__SHIFT 0x0
+#define SMUIO_PWRMGT__i2c1_clk_gate_en__SHIFT 0x4
+#define SMUIO_PWRMGT__i2c_clk_gate_en_MASK 0x00000001L
+#define SMUIO_PWRMGT__i2c1_clk_gate_en_MASK 0x00000010L
+
+
+// addressBlock: smuio_smuio_rom_SmuSmuioDec
+//ROM_CNTL
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x0
+#define ROM_CNTL__READ_MODE__SHIFT 0x1
+#define ROM_CNTL__READ_MODE_OVERRIDE__SHIFT 0x3
+#define ROM_CNTL__SPI_TIMING_RELAX_SCK__SHIFT 0x4
+#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE__SHIFT 0x5
+#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE__SHIFT 0x6
+#define ROM_CNTL__DUMMY_CYCLE_NUM__SHIFT 0x8
+#define ROM_CNTL__SPI_TIMING_RELAX__SHIFT 0x13
+#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE__SHIFT 0x14
+#define ROM_CNTL__SPI_FAST_MODE__SHIFT 0x15
+#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE__SHIFT 0x16
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x17
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE__SHIFT 0x1c
+#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE__SHIFT 0x1d
+#define ROM_CNTL__PAD_SAMPLE_MODE__SHIFT 0x1e
+#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE__SHIFT 0x1f
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x00000001L
+#define ROM_CNTL__SPI_TIMING_RELAX_MASK 0x00080000L
+#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE_MASK 0x00100000L
+#define ROM_CNTL__SPI_FAST_MODE_MASK 0x00200000L
+#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE_MASK 0x00400000L
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0x0F800000L
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE_MASK 0x10000000L
+//PAGE_MIRROR_CNTL
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0x01FFFFFFL
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x02000000L
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0x0C000000L
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x10000000L
+//ROM_STATUS
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define ROM_STATUS__ROM_BUSY_MASK 0x00000001L
+//CGTT_ROM_CLK_CTRL0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//ROM_INDEX
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
+//ROM_DATA
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
+//ROM_START
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0x01FFFFFFL
+//ROM_SW_CNTL
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x13
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0x0000FFFFL
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x00070000L
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x00080000L
+//ROM_SW_STATUS
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x00000001L
+//ROM_SW_COMMAND
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0x000000FFL
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xFFFFFF00L
+//ROM_SW_DATA_1
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_2
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_3
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_4
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_5
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_6
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_7
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_8
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_9
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_10
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_11
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_12
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_13
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_14
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_15
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_16
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_17
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_18
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_19
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_20
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_21
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_22
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_23
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_24
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_25
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_26
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_27
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_28
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_29
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_30
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_31
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_32
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_33
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_34
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_35
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_36
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_37
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_38
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_39
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_40
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_41
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_42
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_43
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_44
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_45
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_46
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_47
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_48
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_49
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_50
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_51
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_52
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_53
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_54
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_55
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_56
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_57
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_58
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_59
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_60
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_61
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_62
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_63
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xFFFFFFFFL
+//ROM_SW_DATA_64
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: smuio_smuio_gpio_SmuSmuioDec
+//SMU_GPIOPAD_SW_INT_STAT
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
+//SMU_GPIOPAD_MASK
+#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_A
+#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_TXIMPSEL
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_EN
+#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_Y
+#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RXEN
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL1
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PU_EN
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PD_EN
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PINSTRAPS
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
+//DFT_PINSTRAPS
+#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0
+#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000000FFL
+//SMU_GPIOPAD_INT_STAT_EN
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT_AK
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
+//SMU_GPIOPAD_INT_EN
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_TYPE
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
+//SMU_GPIOPAD_INT_POLARITY
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
+//SMUIO_PCC_GPIO_SELECT
+#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_S0
+#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0
+#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_S1
+#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0
+#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCHMEN
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCL_EN
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SDA_EN
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL
+//SMUIO_GPIO_INT0_SELECT
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT1_SELECT
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT2_SELECT
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT3_SELECT
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_MP_INT0_STAT
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT1_STAT
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT2_STAT
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT3_STAT
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL
+//SMIO_INDEX
+#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0
+#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L
+//S0_VID_SMIO_CNTL
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL
+//S1_VID_SMIO_CNTL
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL
+//OPEN_DRAIN_SELECT
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0
+#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL
+#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L
+//SMIO_ENABLE
+#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0
+#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 32054ecf0b87..805c9d37a2b4 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -150,6 +150,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ AMDGPU_PP_SENSOR_VCN_LOAD,
};
enum amd_pp_task {
@@ -420,7 +421,7 @@ struct amd_pm_funcs {
int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
- bool (*get_asic_baco_capability)(void *handle);
+ int (*get_asic_baco_capability)(void *handle);
int (*get_asic_baco_state)(void *handle, int *state);
int (*set_asic_baco_state)(void *handle, int state);
int (*get_ppfeature_status)(void *handle, char *buf);
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index ec5b9ab67c5e..b72d5d362251 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -61,6 +61,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_MISC = 14,
MES_SCH_API_UPDATE_ROOT_PAGE_TABLE = 15,
MES_SCH_API_AMD_LOG = 16,
+ MES_SCH_API_SET_HW_RSRC_1 = 19,
MES_SCH_API_MAX = 0xFF
};
@@ -238,6 +239,26 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+union MESAPI_SET_HW_RESOURCES_1 {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ uint64_t timestamp;
+ union {
+ struct {
+ uint32_t enable_mes_info_ctx : 1;
+ uint32_t reserved : 31;
+ };
+ uint32_t uint32_all;
+ };
+ uint64_t mes_info_ctx_mc_addr;
+ uint32_t mes_info_ctx_size;
+ uint32_t mes_kiq_unmap_timeout; // unit is 100ms
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
union MESAPI__ADD_QUEUE {
struct {
union MES_API_HEADER header;
@@ -278,10 +299,21 @@ union MESAPI__ADD_QUEUE {
uint32_t skip_process_ctx_clear : 1;
uint32_t map_legacy_kq : 1;
uint32_t exclusively_scheduled : 1;
- uint32_t reserved : 17;
+ uint32_t is_long_running : 1;
+ uint32_t is_dwm_queue : 1;
+ uint32_t is_video_blit_queue : 1;
+ uint32_t reserved : 14;
};
- struct MES_API_STATUS api_status;
- uint64_t tma_addr;
+ struct MES_API_STATUS api_status;
+ uint64_t tma_addr;
+ uint32_t sch_id;
+ uint64_t timestamp;
+ uint32_t process_context_array_index;
+ uint32_t gang_context_array_index;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t alignment_mode_setting;
+ uint64_t unmap_flag_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
index beadb9e42850..ca83e9e5c3ff 100644
--- a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
+++ b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
@@ -234,7 +234,8 @@ union UMSCHAPI__SET_HW_RESOURCES {
uint32_t enable_level_process_quantum_check : 1;
uint32_t is_vcn0_enabled : 1;
uint32_t is_vcn1_enabled : 1;
- uint32_t reserved : 27;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 26;
};
uint32_t uint32_all;
};
@@ -297,9 +298,12 @@ union UMSCHAPI__ADD_QUEUE {
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
struct UMSCH_API_STATUS api_status;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -314,6 +318,7 @@ union UMSCHAPI__REMOVE_QUEUE {
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -337,6 +342,7 @@ union UMSCHAPI__SUSPEND {
uint32_t suspend_fence_value;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -356,6 +362,7 @@ union UMSCHAPI__RESUME {
enum UMSCH_ENGINE_TYPE engine_type;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -404,6 +411,7 @@ union UMSCHAPI__UPDATE_AFFINITY {
union UMSCH_AFFINITY affinity;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -417,6 +425,7 @@ union UMSCHAPI__CHANGE_CONTEXT_PRIORITY_LEVEL {
uint64_t context_quantum;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index f84bfed50681..eee919577b44 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -199,14 +199,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
return ret;
}
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- bool ret;
+ int ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
- return false;
+ return 0;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
@@ -217,7 +217,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
* devices. Needs more investigation.
*/
if (adev->in_s3)
- return false;
+ return 0;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e..c11952a4389b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -38,6 +38,8 @@
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
#define MAX_NUM_OF_SUBSETS 8
+#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
+
struct od_attribute {
struct kobj_attribute attribute;
struct list_head entry;
@@ -1582,6 +1584,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
}
/**
+ * DOC: vcn_busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the VCN
+ * is as a percentage. The file vcn_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ unsigned int value;
+ int r;
+
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/**
* DOC: pcie_bw
*
* The amdgpu driver provides a sysfs API for estimating how much data
@@ -2091,6 +2117,99 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
return 0;
}
+static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
+ uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+
+ *states = ATTR_STATE_SUPPORTED;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (gc_ver < IP_VERSION(9, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (mp1_ver < IP_VERSION(10, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3))
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ switch (gc_ver) {
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+
+ return 0;
+}
+
/* Following items will be read out to indicate current plpd policy:
* - -1: none
* - 0: disallow
@@ -2162,17 +2281,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_dcefclk_attr_update),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2180,6 +2308,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
.attr_update = pp_od_clk_voltage_attr_update),
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2201,28 +2330,28 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
- uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- const char *attr_name = dev_attr->attr.name;
if (!(attr->flags & mask)) {
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
-#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
-
- if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (gc_ver < IP_VERSION(9, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (mp1_ver < IP_VERSION(10, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (DEVICE_ATTR_IS(mem_busy_percent)) {
if ((adev->flags & AMD_IS_APU &&
gc_ver != IP_VERSION(9, 4, 3)) ||
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0)))
+ *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
if (adev->flags & AMD_IS_APU ||
@@ -2253,36 +2382,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
@@ -2304,23 +2403,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
- if (gc_ver == IP_VERSION(9, 4, 2) ||
- gc_ver == IP_VERSION(9, 4, 3))
- *states = ATTR_STATE_UNSUPPORTED;
}
switch (gc_ver) {
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
- if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
- DEVICE_ATTR_IS(pp_dpm_socclk) ||
- DEVICE_ATTR_IS(pp_dpm_fclk)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- break;
case IP_VERSION(10, 3, 0):
if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
amdgpu_sriov_vf(adev)) {
@@ -2332,14 +2417,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
break;
}
- /* setting should not be allowed from VF if not in one VF mode */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
-
-#undef DEVICE_ATTR_IS
-
return 0;
}
@@ -4261,6 +4338,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
@@ -4322,6 +4406,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = amdgpu_od_set_init(adev);
if (ret)
goto err_out1;
+ } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
+ dev_info(adev->dev, "overdrive feature is not supported\n");
}
adev->pm.sysfs_initialized = true;
@@ -4429,6 +4515,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* MEM Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
seq_printf(m, "MEM Load: %u %%\n", value);
+ /* VCN Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
+ seq_printf(m, "VCN Load: %u %%\n", value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 621200e0823f..501f8c726e8d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -50,8 +50,12 @@ enum amdgpu_runpm_mode {
AMDGPU_RUNPM_PX,
AMDGPU_RUNPM_BOCO,
AMDGPU_RUNPM_BACO,
+ AMDGPU_RUNPM_BAMACO,
};
+#define BACO_SUPPORT (1<<0)
+#define MACO_SUPPORT (1<<1)
+
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -407,7 +411,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
index eec816f0cbf9..448ba3a14584 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
@@ -43,8 +43,48 @@ enum amdgpu_device_attr_states {
ATTR_STATE_SUPPORTED,
};
+enum amdgpu_device_attr_id {
+ device_attr_id__unknown = -1,
+ device_attr_id__power_dpm_state = 0,
+ device_attr_id__power_dpm_force_performance_level,
+ device_attr_id__pp_num_states,
+ device_attr_id__pp_cur_state,
+ device_attr_id__pp_force_state,
+ device_attr_id__pp_table,
+ device_attr_id__pp_dpm_sclk,
+ device_attr_id__pp_dpm_mclk,
+ device_attr_id__pp_dpm_socclk,
+ device_attr_id__pp_dpm_fclk,
+ device_attr_id__pp_dpm_vclk,
+ device_attr_id__pp_dpm_vclk1,
+ device_attr_id__pp_dpm_dclk,
+ device_attr_id__pp_dpm_dclk1,
+ device_attr_id__pp_dpm_dcefclk,
+ device_attr_id__pp_dpm_pcie,
+ device_attr_id__pp_sclk_od,
+ device_attr_id__pp_mclk_od,
+ device_attr_id__pp_power_profile_mode,
+ device_attr_id__pp_od_clk_voltage,
+ device_attr_id__gpu_busy_percent,
+ device_attr_id__mem_busy_percent,
+ device_attr_id__vcn_busy_percent,
+ device_attr_id__pcie_bw,
+ device_attr_id__pp_features,
+ device_attr_id__unique_id,
+ device_attr_id__thermal_throttling_logging,
+ device_attr_id__apu_thermal_cap,
+ device_attr_id__gpu_metrics,
+ device_attr_id__smartshift_apu_power,
+ device_attr_id__smartshift_dgpu_power,
+ device_attr_id__smartshift_bias,
+ device_attr_id__xgmi_plpd_policy,
+ device_attr_id__pm_metrics,
+ device_attr_id__count,
+};
+
struct amdgpu_device_attr {
struct device_attribute dev_attr;
+ enum amdgpu_device_attr_id attr_id;
enum amdgpu_device_attr_flags flags;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states);
@@ -61,6 +101,7 @@ struct amdgpu_device_attr_entry {
#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .attr_id = device_attr_id__##_name, \
.flags = _flags, \
##__VA_ARGS__, }
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5cb4725c773f..6bb42d04b247 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3316,6 +3316,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.soft_reset = kv_dpm_soft_reset,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version kv_smu_ip_block = {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index eb4da3666e05..f245fc0bc6d3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -8060,6 +8060,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = {
.soft_reset = si_dpm_soft_reset,
.set_clockgating_state = si_dpm_set_clockgating_state,
.set_powergating_state = si_dpm_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version si_smu_ip_block =
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index aed0e2cefbf9..5fb21a0508cd 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -302,6 +302,8 @@ static const struct amd_ip_funcs pp_ip_funcs = {
.soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
@@ -1371,7 +1373,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
return phm_set_active_display_count(hwmgr, count);
}
-static bool pp_get_asic_baco_capability(void *handle)
+static int pp_get_asic_baco_capability(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
@@ -1379,10 +1381,10 @@ static bool pp_get_asic_baco_capability(void *handle)
return false;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
- !hwmgr->hwmgr_func->get_asic_baco_capability)
+ !hwmgr->hwmgr_func->get_bamaco_support)
return false;
- return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr);
+ return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
}
static int pp_get_asic_baco_state(void *handle, int *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
index e8a9471c1898..ad60918aaae1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
@@ -33,7 +33,7 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
+int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
@@ -44,9 +44,9 @@ bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
- return false;
+ return 0;
}
int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
index 73a773f4ce2e..750082ea74d8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index aa91730e4eaf..1fcd4451001f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5791,7 +5791,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
- .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_bamaco_support = smu7_get_bamaco_support,
.get_asic_baco_state = smu7_baco_get_state,
.set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
index c66ef9741535..c1ce1d7cae48 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
@@ -28,13 +28,13 @@
#include "vega10_inc.h"
#include "smu9_baco.h"
-bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
+int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg, data;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return false;
+ return 0;
WREG32(0x12074, 0xFFF0003B);
data = RREG32(0x12075);
@@ -43,10 +43,10 @@ bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
}
- return false;
+ return 0;
}
int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
index 9ff7c2ea1b58..2c100482084c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 6d6bc6a380b3..9f5bd998c6bf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5756,7 +5756,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
- .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_bamaco_support = smu9_get_bamaco_support,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 460067933de2..c223e3a6bfca 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2966,7 +2966,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
- .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_bamaco_support = smu9_get_bamaco_support,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index dad4c80aee58..424e4ec9e389 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
@@ -36,22 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr)
+int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return false;
+ return 0;
if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) {
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
+ return BACO_SUPPORT;
}
- return false;
+ return 0;
}
int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
index bdad9c915631..0f2dd8c008ba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr);
+extern int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 3b33af30eb0f..f9efb0bad807 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -4422,7 +4422,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.notify_cac_buffer_info = vega20_notify_cac_buffer_info,
.enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
/* BACO related */
- .get_asic_baco_capability = vega20_baco_get_capability,
+ .get_bamaco_support = vega20_get_bamaco_support,
.get_asic_baco_state = vega20_baco_get_state,
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 6f536159df4d..69928a4a074b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -351,7 +351,7 @@ struct pp_hwmgr_func {
int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
- bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr);
+ int (*get_bamaco_support)(struct pp_hwmgr *hwmgr);
int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 246b211b1e85..7789b313285c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -45,6 +45,7 @@
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "smu_v14_0_0_ppt.h"
+#include "smu_v14_0_2_ppt.h"
#include "amd_pcie.h"
/*
@@ -715,6 +716,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 1):
smu_v14_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu_v14_0_2_set_ppt_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -735,8 +740,9 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
+ smu->smu_baco.maco_support = false;
smu->user_dpm_profile.fan_mode = -1;
mutex_init(&smu->message_lock);
@@ -1966,10 +1972,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev))
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1987,7 +2008,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
@@ -3200,17 +3229,17 @@ static int smu_set_xgmi_pstate(void *handle,
return ret;
}
-static bool smu_get_baco_capability(void *handle)
+static int smu_get_baco_capability(void *handle)
{
struct smu_context *smu = handle;
if (!smu->pm_enabled)
return false;
- if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
+ if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
return false;
- return smu->ppt_funcs->baco_is_support(smu);
+ return smu->ppt_funcs->get_bamaco_support(smu);
}
static int smu_baco_set_state(void *handle, int state)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a870bdd49a4e..0917dec8efe3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
+ SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
@@ -458,7 +459,7 @@ struct smu_umd_pstate_table {
struct cmn2asic_msg_mapping {
int valid_mapping;
int map_to;
- int valid_in_vf;
+ uint32_t flags;
};
struct cmn2asic_mapping {
@@ -538,6 +539,7 @@ struct smu_context {
uint32_t smc_driver_if_version;
uint32_t smc_fw_if_version;
uint32_t smc_fw_version;
+ uint32_t smc_fw_caps;
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
@@ -1173,9 +1175,11 @@ struct pptable_funcs {
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
/**
- * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+ * @get_bamaco_support: Check if GPU supports BACO/MACO
+ * BACO: Bus Active, Chip Off
+ * MACO: Memory Active, Chip Off
*/
- bool (*baco_is_support)(struct smu_context *smu);
+ int (*get_bamaco_support)(struct smu_context *smu);
/**
* @baco_get_state: Get the current BACO state.
@@ -1482,8 +1486,8 @@ enum smu_baco_seq {
BACO_SEQ_COUNT,
};
-#define MSG_MAP(msg, index, valid_in_vf) \
- [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
+#define MSG_MAP(msg, index, flags) \
+ [SMU_MSG_##msg] = {1, (index), (flags)}
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
new file mode 100644
index 000000000000..97a29b80fb13
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -0,0 +1,1836 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU14_DRIVER_IF_V14_0_H
+#define SMU14_DRIVER_IF_V14_0_H
+
+//Increment this version if SkuTable_t or BoardTable_t change
+#define PPTABLE_VERSION 0x18
+
+#define NUM_GFXCLK_DPM_LEVELS 16
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_MP0CLK_DPM_LEVELS 2
+#define NUM_DCLK_DPM_LEVELS 8
+#define NUM_VCLK_DPM_LEVELS 8
+#define NUM_DISPCLK_DPM_LEVELS 8
+#define NUM_DPPCLK_DPM_LEVELS 8
+#define NUM_DPREFCLK_DPM_LEVELS 8
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_DTBCLK_DPM_LEVELS 8
+#define NUM_UCLK_DPM_LEVELS 6
+#define NUM_LINK_LEVELS 3
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_OD_FAN_MAX_POINTS 6
+
+// Feature Control Defines
+#define FEATURE_FW_DATA_READ_BIT 0
+#define FEATURE_DPM_GFXCLK_BIT 1
+#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2
+#define FEATURE_DPM_UCLK_BIT 3
+#define FEATURE_DPM_FCLK_BIT 4
+#define FEATURE_DPM_SOCCLK_BIT 5
+#define FEATURE_DPM_LINK_BIT 6
+#define FEATURE_DPM_DCN_BIT 7
+#define FEATURE_VMEMP_SCALING_BIT 8
+#define FEATURE_VDDIO_MEM_SCALING_BIT 9
+#define FEATURE_DS_GFXCLK_BIT 10
+#define FEATURE_DS_SOCCLK_BIT 11
+#define FEATURE_DS_FCLK_BIT 12
+#define FEATURE_DS_LCLK_BIT 13
+#define FEATURE_DS_DCFCLK_BIT 14
+#define FEATURE_DS_UCLK_BIT 15
+#define FEATURE_GFX_ULV_BIT 16
+#define FEATURE_FW_DSTATE_BIT 17
+#define FEATURE_GFXOFF_BIT 18
+#define FEATURE_BACO_BIT 19
+#define FEATURE_MM_DPM_BIT 20
+#define FEATURE_SOC_MPCLK_DS_BIT 21
+#define FEATURE_BACO_MPCLK_DS_BIT 22
+#define FEATURE_THROTTLERS_BIT 23
+#define FEATURE_SMARTSHIFT_BIT 24
+#define FEATURE_GTHR_BIT 25
+#define FEATURE_ACDC_BIT 26
+#define FEATURE_VR0HOT_BIT 27
+#define FEATURE_FW_CTF_BIT 28
+#define FEATURE_FAN_CONTROL_BIT 29
+#define FEATURE_GFX_DCS_BIT 30
+#define FEATURE_GFX_READ_MARGIN_BIT 31
+#define FEATURE_LED_DISPLAY_BIT 32
+#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33
+#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34
+#define FEATURE_OPTIMIZED_VMIN_BIT 35
+#define FEATURE_GFX_IMU_BIT 36
+#define FEATURE_BOOT_TIME_CAL_BIT 37
+#define FEATURE_GFX_PCC_DFLL_BIT 38
+#define FEATURE_SOC_CG_BIT 39
+#define FEATURE_DF_CSTATE_BIT 40
+#define FEATURE_GFX_EDC_BIT 41
+#define FEATURE_BOOT_POWER_OPT_BIT 42
+#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43
+#define FEATURE_DS_VCN_BIT 44
+#define FEATURE_BACO_CG_BIT 45
+#define FEATURE_MEM_TEMP_READ_BIT 46
+#define FEATURE_ATHUB_MMHUB_PG_BIT 47
+#define FEATURE_SOC_PCC_BIT 48
+#define FEATURE_EDC_PWRBRK_BIT 49
+#define FEATURE_SOC_EDC_XVMIN_BIT 50
+#define FEATURE_GFX_PSM_DIDT_BIT 51
+#define FEATURE_APT_ALL_ENABLE_BIT 52
+#define FEATURE_APT_SQ_THROTTLE_BIT 53
+#define FEATURE_APT_PF_DCS_BIT 54
+#define FEATURE_GFX_EDC_XVMIN_BIT 55
+#define FEATURE_GFX_DIDT_XVMIN_BIT 56
+#define FEATURE_FAN_ABNORMAL_BIT 57
+#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58
+#define FEATURE_SPARE_59_BIT 59
+#define FEATURE_SPARE_60_BIT 60
+#define FEATURE_SPARE_61_BIT 61
+#define FEATURE_SPARE_62_BIT 62
+#define FEATURE_SPARE_63_BIT 63
+#define NUM_FEATURES 64
+
+#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT)
+
+
+//For use with feature control messages
+typedef enum {
+ FEATURE_PWR_ALL,
+ FEATURE_PWR_S5,
+ FEATURE_PWR_BACO,
+ FEATURE_PWR_SOC,
+ FEATURE_PWR_GFX,
+ FEATURE_PWR_DOMAIN_COUNT,
+} FEATURE_PWR_DOMAIN_e;
+
+//For use with feature control + BTC save restore
+typedef enum {
+ FEATURE_BTC_NOP,
+ FEATURE_BTC_SAVE,
+ FEATURE_BTC_RESTORE,
+ FEATURE_BTC_COUNT,
+} FEATURE_BTC_e;
+
+// Debug Overrides Bitmask
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
+#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
+#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010
+#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020
+#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040
+#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080
+#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100
+#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
+#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
+#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
+#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
+#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
+#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
+#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+
+// VR Mapping Bit Defines
+#define VR_MAPPING_VR_SELECT_MASK 0x01
+#define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+#define VR_MAPPING_PLANE_SELECT_MASK 0x02
+#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
+
+// PSI Bit Defines
+#define PSI_SEL_VR0_PLANE0_PSI0 0x01
+#define PSI_SEL_VR0_PLANE0_PSI1 0x02
+#define PSI_SEL_VR0_PLANE1_PSI0 0x04
+#define PSI_SEL_VR0_PLANE1_PSI1 0x08
+#define PSI_SEL_VR1_PLANE0_PSI0 0x10
+#define PSI_SEL_VR1_PLANE0_PSI1 0x20
+#define PSI_SEL_VR1_PLANE1_PSI0 0x40
+#define PSI_SEL_VR1_PLANE1_PSI1 0x80
+
+typedef enum {
+ SVI_PSI_0, // Full phase count (default)
+ SVI_PSI_1, // Phase count 1st level
+ SVI_PSI_2, // Phase count 2nd level
+ SVI_PSI_3, // Single phase operation + active diode emulation
+ SVI_PSI_4, // Single phase operation + passive diode emulation *optional*
+ SVI_PSI_5, // Reserved
+ SVI_PSI_6, // Power down to 0V (voltage regulation disabled)
+ SVI_PSI_7, // Automated phase shedding and diode emulation
+} SVI_PSI_e;
+
+// Throttler Control/Status Bits
+#define THROTTLER_TEMP_EDGE_BIT 0
+#define THROTTLER_TEMP_HOTSPOT_BIT 1
+#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2
+#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3
+#define THROTTLER_TEMP_MEM_BIT 4
+#define THROTTLER_TEMP_VR_GFX_BIT 5
+#define THROTTLER_TEMP_VR_SOC_BIT 6
+#define THROTTLER_TEMP_VR_MEM0_BIT 7
+#define THROTTLER_TEMP_VR_MEM1_BIT 8
+#define THROTTLER_TEMP_LIQUID0_BIT 9
+#define THROTTLER_TEMP_LIQUID1_BIT 10
+#define THROTTLER_TEMP_PLX_BIT 11
+#define THROTTLER_TDC_GFX_BIT 12
+#define THROTTLER_TDC_SOC_BIT 13
+#define THROTTLER_PPT0_BIT 14
+#define THROTTLER_PPT1_BIT 15
+#define THROTTLER_PPT2_BIT 16
+#define THROTTLER_PPT3_BIT 17
+#define THROTTLER_FIT_BIT 18
+#define THROTTLER_GFX_APCC_PLUS_BIT 19
+#define THROTTLER_GFX_DVO_BIT 20
+#define THROTTLER_COUNT 21
+
+// FW DState Features Control Bits
+#define FW_DSTATE_SOC_ULV_BIT 0
+#define FW_DSTATE_G6_HSR_BIT 1
+#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2
+#define FW_DSTATE_SMN_DS_BIT 3
+#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4
+#define FW_DSTATE_SOC_LIV_MIN_BIT 5
+#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6
+#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7
+#define FW_DSTATE_MALL_ALLOC_BIT 8
+#define FW_DSTATE_MEM_PSI_BIT 9
+#define FW_DSTATE_HSR_NON_STROBE_BIT 10
+#define FW_DSTATE_MP0_ENTER_WFI_BIT 11
+#define FW_DSTATE_MALL_FLUSH_BIT 12
+#define FW_DSTATE_SOC_PSI_BIT 13
+#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14
+#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15
+#define FW_DSTATE_CLDO_PRG_BIT 16
+#define FW_DSTATE_DF_PLL_PWRDN_BIT 17
+
+//LED Display Mask & Control Bits
+#define LED_DISPLAY_GFX_DPM_BIT 0
+#define LED_DISPLAY_PCIE_BIT 1
+#define LED_DISPLAY_ERROR_BIT 2
+
+
+#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0
+#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1
+#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2
+
+typedef enum {
+ SMARTSHIFT_VERSION_1,
+ SMARTSHIFT_VERSION_2,
+ SMARTSHIFT_VERSION_3,
+} SMARTSHIFT_VERSION_e;
+
+typedef enum {
+ FOPT_CALC_AC_CALC_DC,
+ FOPT_PPTABLE_AC_CALC_DC,
+ FOPT_CALC_AC_PPTABLE_DC,
+ FOPT_PPTABLE_AC_PPTABLE_DC,
+} FOPT_CALC_e;
+
+typedef enum {
+ DRAM_BIT_WIDTH_DISABLED = 0,
+ DRAM_BIT_WIDTH_X_8 = 8,
+ DRAM_BIT_WIDTH_X_16 = 16,
+ DRAM_BIT_WIDTH_X_32 = 32,
+ DRAM_BIT_WIDTH_X_64 = 64,
+ DRAM_BIT_WIDTH_X_128 = 128,
+ DRAM_BIT_WIDTH_COUNT,
+} DRAM_BIT_WIDTH_TYPE_e;
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_VMEMP,
+ I2C_CONTROLLER_NAME_VR_VDDIO,
+ I2C_CONTROLLER_NAME_LIQUID0,
+ I2C_CONTROLLER_NAME_LIQUID1,
+ I2C_CONTROLLER_NAME_PLX,
+ I2C_CONTROLLER_NAME_FAN_INTAKE,
+ I2C_CONTROLLER_NAME_COUNT,
+} I2cControllerName_e;
+
+typedef enum {
+ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
+ I2C_CONTROLLER_THROTTLER_VR_GFX,
+ I2C_CONTROLLER_THROTTLER_VR_SOC,
+ I2C_CONTROLLER_THROTTLER_VR_VMEMP,
+ I2C_CONTROLLER_THROTTLER_VR_VDDIO,
+ I2C_CONTROLLER_THROTTLER_LIQUID0,
+ I2C_CONTROLLER_THROTTLER_LIQUID1,
+ I2C_CONTROLLER_THROTTLER_PLX,
+ I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
+ I2C_CONTROLLER_THROTTLER_INA3221,
+ I2C_CONTROLLER_THROTTLER_COUNT,
+} I2cControllerThrottler_e;
+
+typedef enum {
+ I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
+ I2C_CONTROLLER_PROTOCOL_VR_IR35217,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
+ I2C_CONTROLLER_PROTOCOL_INA3221,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
+ I2C_CONTROLLER_PROTOCOL_COUNT,
+} I2cControllerProtocol_e;
+
+typedef struct {
+ uint8_t Enabled;
+ uint8_t Speed;
+ uint8_t SlaveAddress;
+ uint8_t ControllerPort;
+ uint8_t ControllerName;
+ uint8_t ThermalThrotter;
+ uint8_t I2cProtocol;
+ uint8_t PaddingConfig;
+} I2cControllerConfig_t;
+
+typedef enum {
+ I2C_PORT_SVD_SCL = 0,
+ I2C_PORT_GPIO,
+} I2cPort_e;
+
+typedef enum {
+ I2C_SPEED_FAST_50K = 0, //50 Kbits/s
+ I2C_SPEED_FAST_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
+ I2C_SPEED_HIGH_2M, //2.3 Mbits/s
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ = 0,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+typedef struct {
+ uint8_t ReadWriteData; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
+ uint8_t SlaveAddress; //Slave address of device
+ uint8_t NumCmds; //Number of commands
+
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t; // SW I2C Request Table
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+
+ uint32_t Spare[8];
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SwI2cRequestExternal_t;
+
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[24];
+} EccInfoTable_t;
+
+//D3HOT sequences
+typedef enum {
+ BACO_SEQUENCE,
+ MSR_SEQUENCE,
+ BAMACO_SEQUENCE,
+ ULPS_SEQUENCE,
+ D3HOT_SEQUENCE_COUNT,
+} D3HOTSequence_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_DYNAMIC_MODE = 0,
+ PG_STATIC_MODE,
+} PowerGatingMode_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_POWER_DOWN = 0,
+ PG_POWER_UP,
+} PowerGatingSettings_e;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} QuadraticInt_t;
+
+typedef struct {
+ uint32_t m; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+} LinearInt_t;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} DroopInt_t;
+
+typedef enum {
+ DCS_ARCH_DISABLED,
+ DCS_ARCH_FADCS,
+ DCS_ARCH_ASYNC,
+} DCS_ARCH_e;
+
+//Only Clks that have DPM descriptors are listed here
+typedef enum {
+ PPCLK_GFXCLK = 0,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_DCLK_0,
+ PPCLK_VCLK_0,
+ PPCLK_DISPCLK,
+ PPCLK_DPPCLK,
+ PPCLK_DPREFCLK,
+ PPCLK_DCFCLK,
+ PPCLK_DTBCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ VOLTAGE_MODE_PPTABLE = 0,
+ VOLTAGE_MODE_FUSES,
+ VOLTAGE_MODE_COUNT,
+} VOLTAGE_MODE_e;
+
+typedef enum {
+ AVFS_VOLTAGE_GFX = 0,
+ AVFS_VOLTAGE_SOC,
+ AVFS_VOLTAGE_COUNT,
+} AVFS_VOLTAGE_TYPE_e;
+
+typedef enum {
+ AVFS_TEMP_COLD = 0,
+ AVFS_TEMP_HOT,
+ AVFS_TEMP_COUNT,
+} AVFS_TEMP_e;
+
+typedef enum {
+ AVFS_D_G,
+ AVFS_D_COUNT,
+} AVFS_D_e;
+
+
+typedef enum {
+ UCLK_DIV_BY_1 = 0,
+ UCLK_DIV_BY_2,
+ UCLK_DIV_BY_4,
+ UCLK_DIV_BY_8,
+} UCLK_DIV_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW = 0,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+typedef enum {
+ PWR_CONFIG_TDP = 0,
+ PWR_CONFIG_TGP,
+ PWR_CONFIG_TCP_ESTIMATED,
+ PWR_CONFIG_TCP_MEASURED,
+ PWR_CONFIG_TBP_DESKTOP,
+ PWR_CONFIG_TBP_MOBILE,
+} PwrConfig_e;
+
+typedef struct {
+ uint8_t Padding;
+ uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
+ uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
+ uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e
+ LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
+ uint32_t Padding3[3];
+ uint16_t Padding4;
+ uint16_t FoptimalDc; //Foptimal frequency in DC power mode.
+ uint16_t FoptimalAc; //Foptimal frequency in AC power mode.
+ uint16_t Padding2;
+} DpmDescriptor_t;
+
+typedef enum {
+ PPT_THROTTLER_PPT0,
+ PPT_THROTTLER_PPT1,
+ PPT_THROTTLER_PPT2,
+ PPT_THROTTLER_PPT3,
+ PPT_THROTTLER_COUNT
+} PPT_THROTTLER_e;
+
+typedef enum {
+ TEMP_EDGE,
+ TEMP_HOTSPOT,
+ TEMP_HOTSPOT_GFX,
+ TEMP_HOTSPOT_SOC,
+ TEMP_MEM,
+ TEMP_VR_GFX,
+ TEMP_VR_SOC,
+ TEMP_VR_MEM0,
+ TEMP_VR_MEM1,
+ TEMP_LIQUID0,
+ TEMP_LIQUID1,
+ TEMP_PLX,
+ TEMP_COUNT,
+} TEMP_e;
+
+typedef enum {
+ TDC_THROTTLER_GFX,
+ TDC_THROTTLER_SOC,
+ TDC_THROTTLER_COUNT
+} TDC_THROTTLER_e;
+
+typedef enum {
+ SVI_PLANE_VDD_GFX,
+ SVI_PLANE_VDD_SOC,
+ SVI_PLANE_VDDCI_MEM,
+ SVI_PLANE_VDDIO_MEM,
+ SVI_PLANE_COUNT,
+} SVI_PLANE_e;
+
+typedef enum {
+ PMFW_VOLT_PLANE_GFX,
+ PMFW_VOLT_PLANE_SOC,
+ PMFW_VOLT_PLANE_COUNT
+} PMFW_VOLT_PLANE_e;
+
+typedef enum {
+ CUSTOMER_VARIANT_ROW,
+ CUSTOMER_VARIANT_FALCON,
+ CUSTOMER_VARIANT_COUNT,
+} CUSTOMER_VARIANT_e;
+
+typedef enum {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_COUNT,
+} POWER_SOURCE_e;
+
+typedef enum {
+ MEM_VENDOR_PLACEHOLDER0, // 0
+ MEM_VENDOR_SAMSUNG, // 1
+ MEM_VENDOR_INFINEON, // 2
+ MEM_VENDOR_ELPIDA, // 3
+ MEM_VENDOR_ETRON, // 4
+ MEM_VENDOR_NANYA, // 5
+ MEM_VENDOR_HYNIX, // 6
+ MEM_VENDOR_MOSEL, // 7
+ MEM_VENDOR_WINBOND, // 8
+ MEM_VENDOR_ESMT, // 9
+ MEM_VENDOR_PLACEHOLDER1, // 10
+ MEM_VENDOR_PLACEHOLDER2, // 11
+ MEM_VENDOR_PLACEHOLDER3, // 12
+ MEM_VENDOR_PLACEHOLDER4, // 13
+ MEM_VENDOR_PLACEHOLDER5, // 14
+ MEM_VENDOR_MICRON, // 15
+ MEM_VENDOR_COUNT,
+} MEM_VENDOR_e;
+
+typedef enum {
+ PP_GRTAVFS_HW_CPO_CTL_ZONE0,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE1,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE2,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE3,
+ PP_GRTAVFS_HW_CPO_CTL_ZONE4,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3,
+ PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4,
+ PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4,
+ PP_GRTAVFS_HW_ZONE0_VF,
+ PP_GRTAVFS_HW_ZONE1_VF1,
+ PP_GRTAVFS_HW_ZONE2_VF2,
+ PP_GRTAVFS_HW_ZONE3_VF3,
+ PP_GRTAVFS_HW_VOLTAGE_GB,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3,
+ PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4,
+ PP_GRTAVFS_HW_RESERVED_0,
+ PP_GRTAVFS_HW_RESERVED_1,
+ PP_GRTAVFS_HW_RESERVED_2,
+ PP_GRTAVFS_HW_RESERVED_3,
+ PP_GRTAVFS_HW_RESERVED_4,
+ PP_GRTAVFS_HW_RESERVED_5,
+ PP_GRTAVFS_HW_RESERVED_6,
+ PP_GRTAVFS_HW_FUSE_COUNT,
+} PP_GRTAVFS_HW_FUSE_e;
+
+typedef enum {
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0,
+ PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3,
+ PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4,
+ PP_GRTAVFS_FW_COMMON_FUSE_COUNT,
+} PP_GRTAVFS_FW_COMMON_FUSE_e;
+
+typedef enum {
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3,
+ PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3,
+ PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4,
+ PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY,
+ PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3,
+ PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4,
+ PP_GRTAVFS_FW_SEP_FUSE_COUNT,
+} PP_GRTAVFS_FW_SEP_FUSE_e;
+
+#define PP_NUM_RTAVFS_PWL_ZONES 5
+
+
+// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
+// Slope Q1.7, Offset Q1.2
+typedef struct {
+ int8_t Offset; // in Amps
+ uint8_t Padding;
+ uint16_t MaxCurrent; // in Amps
+} SviTelemetryScale_t;
+
+#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
+
+#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
+#define PP_OD_FEATURE_GFX_VMAX_BIT 1
+#define PP_OD_FEATURE_SOC_VMAX_BIT 2
+#define PP_OD_FEATURE_PPT_BIT 3
+#define PP_OD_FEATURE_FAN_CURVE_BIT 4
+#define PP_OD_FEATURE_FAN_LEGACY_BIT 5
+#define PP_OD_FEATURE_FULL_CTRL_BIT 6
+#define PP_OD_FEATURE_TDC_BIT 7
+#define PP_OD_FEATURE_GFXCLK_BIT 8
+#define PP_OD_FEATURE_UCLK_BIT 9
+#define PP_OD_FEATURE_FCLK_BIT 10
+#define PP_OD_FEATURE_ZERO_FAN_BIT 11
+#define PP_OD_FEATURE_TEMPERATURE_BIT 12
+#define PP_OD_FEATURE_EDC_BIT 13
+#define PP_OD_FEATURE_COUNT 14
+
+typedef enum {
+ PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
+ PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING,
+ PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
+} PP_OD_POWER_FEATURE_e;
+
+typedef enum {
+ FAN_MODE_AUTO = 0,
+ FAN_MODE_MANUAL_LINEAR,
+} FanMode_e;
+
+typedef enum {
+ OD_NO_ERROR,
+ OD_REQUEST_ADVANCED_NOT_SUPPORTED,
+ OD_UNSUPPORTED_FEATURE,
+ OD_INVALID_FEATURE_COMBO_ERROR,
+ OD_GFXCLK_VF_CURVE_OFFSET_ERROR,
+ OD_VDD_GFX_VMAX_ERROR,
+ OD_VDD_SOC_VMAX_ERROR,
+ OD_PPT_ERROR,
+ OD_FAN_MIN_PWM_ERROR,
+ OD_FAN_ACOUSTIC_TARGET_ERROR,
+ OD_FAN_ACOUSTIC_LIMIT_ERROR,
+ OD_FAN_TARGET_TEMP_ERROR,
+ OD_FAN_ZERO_RPM_STOP_TEMP_ERROR,
+ OD_FAN_CURVE_PWM_ERROR,
+ OD_FAN_CURVE_TEMP_ERROR,
+ OD_FULL_CTRL_GFXCLK_ERROR,
+ OD_FULL_CTRL_UCLK_ERROR,
+ OD_FULL_CTRL_FCLK_ERROR,
+ OD_FULL_CTRL_VDD_GFX_ERROR,
+ OD_FULL_CTRL_VDD_SOC_ERROR,
+ OD_TDC_ERROR,
+ OD_GFXCLK_ERROR,
+ OD_UCLK_ERROR,
+ OD_FCLK_ERROR,
+ OD_OP_TEMP_ERROR,
+ OD_OP_GFX_EDC_ERROR,
+ OD_OP_GFX_PCC_ERROR,
+ OD_POWER_FEATURE_CTRL_ERROR,
+} OD_FAIL_e;
+
+typedef struct {
+ uint32_t FeatureCtrlMask;
+
+ //Voltage control
+ int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
+
+ uint16_t VddGfxVmax; // in mV
+ uint16_t VddSocVmax;
+
+ uint8_t IdlePwrSavingFeaturesCtrl;
+ uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint16_t Padding;
+
+ //Frequency changes
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
+ uint16_t UclkFmin; // MHz
+ uint16_t UclkFmax; // MHz
+ uint16_t FclkFmin;
+ uint16_t FclkFmax;
+
+ //PPT
+ int16_t Ppt; // %
+ int16_t Tdc;
+
+ //Fan control
+ uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
+ uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
+ uint16_t FanMinimumPwm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanTargetTemperature; // Degree Celcius
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanZeroRpmStopTemp;
+ uint8_t FanMode;
+ uint8_t MaxOpTemp;
+
+ uint8_t AdvancedOdModeEnabled;
+ uint8_t Padding1[3];
+
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t SocVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ uint16_t FclkFullCtrlMode;
+ uint16_t Padding2;
+
+ int16_t GfxEdc;
+ int16_t GfxPccLimitControl;
+
+ uint32_t Spare[10];
+ uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
+} OverDriveTable_t;
+
+typedef struct {
+ OverDriveTable_t OverDriveTable;
+
+} OverDriveTableExternal_t;
+
+typedef struct {
+ uint32_t FeatureCtrlMask;
+
+ //Gfx Vf Curve
+ int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
+ //gfx Vmax
+ uint16_t VddGfxVmax; // in mV
+ //soc Vmax
+ uint16_t VddSocVmax;
+
+ //gfxclk
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
+ //uclk
+ uint16_t UclkFmin; // MHz
+ uint16_t UclkFmax; // MHz
+ //fclk
+ uint16_t FclkFmin;
+ uint16_t FclkFmax;
+
+ //PPT
+ int16_t Ppt; // %
+ //TDC
+ int16_t Tdc;
+
+ //Fan Curve
+ uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
+ uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
+ //Fan Legacy
+ uint16_t FanMinimumPwm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanTargetTemperature; // Degree Celcius
+ //zero fan
+ uint8_t FanZeroRpmEnable;
+ //temperature
+ uint8_t MaxOpTemp;
+ uint8_t Padding[2];
+
+ //Full Ctrl
+ uint16_t GfxVoltageFullCtrlMode;
+ uint16_t SocVoltageFullCtrlMode;
+ uint16_t GfxclkFullCtrlMode;
+ uint16_t UclkFullCtrlMode;
+ uint16_t FclkFullCtrlMode;
+ //EDC
+ int16_t GfxEdc;
+ int16_t GfxPccLimitControl;
+ int16_t Padding1;
+
+ uint32_t Spare[5];
+} OverDriveLimits_t;
+
+typedef enum {
+ BOARD_GPIO_SMUIO_0,
+ BOARD_GPIO_SMUIO_1,
+ BOARD_GPIO_SMUIO_2,
+ BOARD_GPIO_SMUIO_3,
+ BOARD_GPIO_SMUIO_4,
+ BOARD_GPIO_SMUIO_5,
+ BOARD_GPIO_SMUIO_6,
+ BOARD_GPIO_SMUIO_7,
+ BOARD_GPIO_SMUIO_8,
+ BOARD_GPIO_SMUIO_9,
+ BOARD_GPIO_SMUIO_10,
+ BOARD_GPIO_SMUIO_11,
+ BOARD_GPIO_SMUIO_12,
+ BOARD_GPIO_SMUIO_13,
+ BOARD_GPIO_SMUIO_14,
+ BOARD_GPIO_SMUIO_15,
+ BOARD_GPIO_SMUIO_16,
+ BOARD_GPIO_SMUIO_17,
+ BOARD_GPIO_SMUIO_18,
+ BOARD_GPIO_SMUIO_19,
+ BOARD_GPIO_SMUIO_20,
+ BOARD_GPIO_SMUIO_21,
+ BOARD_GPIO_SMUIO_22,
+ BOARD_GPIO_SMUIO_23,
+ BOARD_GPIO_SMUIO_24,
+ BOARD_GPIO_SMUIO_25,
+ BOARD_GPIO_SMUIO_26,
+ BOARD_GPIO_SMUIO_27,
+ BOARD_GPIO_SMUIO_28,
+ BOARD_GPIO_SMUIO_29,
+ BOARD_GPIO_SMUIO_30,
+ BOARD_GPIO_SMUIO_31,
+ MAX_BOARD_GPIO_SMUIO_NUM,
+ BOARD_GPIO_DC_GEN_A,
+ BOARD_GPIO_DC_GEN_B,
+ BOARD_GPIO_DC_GEN_C,
+ BOARD_GPIO_DC_GEN_D,
+ BOARD_GPIO_DC_GEN_E,
+ BOARD_GPIO_DC_GEN_F,
+ BOARD_GPIO_DC_GEN_G,
+ BOARD_GPIO_DC_GENLK_CLK,
+ BOARD_GPIO_DC_GENLK_VSYNC,
+ BOARD_GPIO_DC_SWAPLOCK_A,
+ BOARD_GPIO_DC_SWAPLOCK_B,
+ MAX_BOARD_DC_GPIO_NUM,
+ BOARD_GPIO_LV_EN,
+} BOARD_GPIO_TYPE_e;
+
+#define INVALID_BOARD_GPIO 0xFF
+
+
+typedef struct {
+ //PLL 0
+ uint16_t InitImuClk;
+ uint16_t InitSocclk;
+ uint16_t InitMpioclk;
+ uint16_t InitSmnclk;
+ //PLL 1
+ uint16_t InitDispClk;
+ uint16_t InitDppClk;
+ uint16_t InitDprefclk;
+ uint16_t InitDcfclk;
+ uint16_t InitDtbclk;
+ uint16_t InitDbguSocClk;
+ //PLL 2
+ uint16_t InitGfxclk_bypass;
+ uint16_t InitMp1clk;
+ uint16_t InitLclk;
+ uint16_t InitDbguBacoClk;
+ uint16_t InitBaco400clk;
+ uint16_t InitBaco1200clk_bypass;
+ uint16_t InitBaco700clk_bypass;
+ uint16_t InitBaco500clk;
+ // PLL 3
+ uint16_t InitDclk0;
+ uint16_t InitVclk0;
+ // PLL 4
+ uint16_t InitFclk;
+ uint16_t Padding1;
+ // PLL 5
+ //UCLK clocks, assumed all UCLK instances will be the same.
+ uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk
+
+ uint8_t Padding[3];
+
+ uint32_t InitVcoFreqPll0; //smu_socclk_t
+ uint32_t InitVcoFreqPll1; //smu_displayclk_t
+ uint32_t InitVcoFreqPll2; //smu_nbioclk_t
+ uint32_t InitVcoFreqPll3; //smu_vcnclk_t
+ uint32_t InitVcoFreqPll4; //smu_fclk_t
+ uint32_t InitVcoFreqPll5; //smu_uclk_01_t
+ uint32_t InitVcoFreqPll6; //smu_uclk_23_t
+ uint32_t InitVcoFreqPll7; //smu_uclk_45_t
+ uint32_t InitVcoFreqPll8; //smu_uclk_67_t
+
+ //encoding will be SVI3
+ uint16_t InitGfx; // In mV(Q2) , should be 0?
+ uint16_t InitSoc; // In mV(Q2)
+ uint16_t InitVddIoMem; // In mV(Q2) MemVdd
+ uint16_t InitVddCiMem; // In mV(Q2) VMemP
+
+ //uint16_t Padding2;
+
+ uint32_t Spare[8];
+} BootValues_t;
+
+typedef struct {
+ uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts
+ uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps
+
+ uint16_t Temperature[TEMP_COUNT]; // Celsius
+
+ uint8_t PwmLimitMin;
+ uint8_t PwmLimitMax;
+ uint8_t FanTargetTemperature;
+ uint8_t Spare1[1];
+
+ uint16_t AcousticTargetRpmThresholdMin;
+ uint16_t AcousticTargetRpmThresholdMax;
+
+ uint16_t AcousticLimitRpmThresholdMin;
+ uint16_t AcousticLimitRpmThresholdMax;
+
+ uint16_t PccLimitMin;
+ uint16_t PccLimitMax;
+
+ uint16_t FanStopTempMin;
+ uint16_t FanStopTempMax;
+ uint16_t FanStartTempMin;
+ uint16_t FanStartTempMax;
+
+ uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
+ uint32_t Spare[11];
+} MsgLimits_t;
+
+typedef struct {
+ uint16_t BaseClockAc;
+ uint16_t GameClockAc;
+ uint16_t BoostClockAc;
+ uint16_t BaseClockDc;
+ uint16_t GameClockDc;
+ uint16_t BoostClockDc;
+
+ uint32_t Reserved[4];
+} DriverReportedClocks_t;
+
+typedef struct {
+ uint8_t DcBtcEnabled;
+ uint8_t Padding[3];
+
+ uint16_t DcTol; // mV Q2
+ uint16_t DcBtcGb; // mV Q2
+
+ uint16_t DcBtcMin; // mV Q2
+ uint16_t DcBtcMax; // mV Q2
+
+ LinearInt_t DcBtcGbScalar;
+} AvfsDcBtcParams_t;
+
+typedef struct {
+ uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C
+ uint16_t VftFMin; // in MHz
+ uint16_t VInversion; // in mV Q2
+ QuadraticInt_t qVft[AVFS_TEMP_COUNT];
+ QuadraticInt_t qAvfsGb;
+ QuadraticInt_t qAvfsGb2;
+} AvfsFuseOverride_t;
+
+//all settings maintained by PFE team
+typedef struct {
+ uint8_t Version;
+ uint8_t Spare8[3];
+ // SECTION: Feature Control
+ uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping
+ // SECTION: FW DSTATE Settings
+ uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping
+ // SECTION: Advanced Options
+ uint32_t DebugOverrides;
+
+ uint32_t Spare[2];
+} PFE_Settings_t;
+
+typedef struct {
+ // SECTION: Version
+ uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different)
+
+ // SECTION: Miscellaneous Configuration
+ uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e
+ uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e
+ uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT
+ uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e
+
+ // SECTION: Infrastructure Limits
+ uint8_t SocketPowerLimitSpare[10];
+
+ //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars
+ //relative index 0
+ uint8_t EnableLegacyPptLimit;
+ uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support
+
+ uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting
+
+ uint8_t PaddingPpt[7];
+
+ uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only
+
+ uint16_t PaddingInfra;
+
+ // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years)
+ uint32_t FitControllerFailureRateLimit; //in IEEE float
+ //Expected GFX Duty Cycle at Vmax.
+ uint32_t FitControllerGfxDutyCycle; // in IEEE float
+ //Expected SOC Duty Cycle at Vmax.
+ uint32_t FitControllerSocDutyCycle; // in IEEE float
+
+ //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block.
+ uint32_t FitControllerSocOffset; //in IEEE float
+
+ uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value
+
+ // SECTION: Throttler settings
+ uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping
+
+
+ // SECTION: Voltage Control Parameters
+ uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE)
+
+ uint8_t Padding[2];
+ uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE
+
+ // Voltage Limits
+ uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled
+ uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled
+
+ //Vmin Optimizations
+ int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin
+ int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin
+ uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot.
+ uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold.
+ uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
+ uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
+ uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
+ uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
+ uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
+
+ //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
+ uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
+ //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts.
+ uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT];
+ //Scalar coefficient of the PSM aging degradation function
+ uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM
+ //Exponential coefficient of the PSM aging degradation function
+ uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM
+ //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
+ uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN
+ //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
+ uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN
+
+ uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT];
+ uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT];
+
+ uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
+ uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
+
+ QuadraticInt_t Gfx_Vmin_droop;
+ QuadraticInt_t Soc_Vmin_droop;
+ uint32_t SpareVmin[6];
+
+ //SECTION: DPM Configuration 1
+ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
+ uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
+
+ uint16_t GfxclkAibFmax;
+ uint16_t GfxclkFreqCap;
+
+ //GFX Idle Power Settings
+ uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
+ uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz
+ uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz
+ uint16_t GfxclkThrottleClock; //Used primarily in DCS
+ uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages
+ uint8_t GfxIdlePadding;
+
+ uint8_t SmsRepairWRCKClkDivEn;
+ uint8_t SmsRepairWRCKClkDivVal;
+ uint8_t GfxOffEntryEarlyMGCGEn;
+ uint8_t GfxOffEntryForceCGCGEn;
+ uint8_t GfxOffEntryForceCGCGDelayEn;
+ uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds
+
+ uint16_t GfxclkFreqGfxUlv; // in MHz
+ uint8_t GfxIdlePadding2[2];
+ uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry
+ uint32_t GfxoffSpare[15];
+
+ // DFLL
+ uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
+ uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
+ uint32_t DfllBtcMasterScalerM;
+ int32_t DfllBtcMasterScalerB;
+ uint32_t DfllBtcSlaveScalerM;
+ int32_t DfllBtcSlaveScalerB;
+
+ uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
+ uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
+ uint32_t GfxDfllSpare[9];
+
+ // DVO
+ uint32_t DvoPsmDownThresholdVoltage; //Voltage float
+ uint32_t DvoPsmUpThresholdVoltage; //Voltage float
+ uint32_t DvoFmaxLowScaler; //Unitless float
+
+ // GFX DCS
+ uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
+ uint16_t PaddingDcs;
+
+ uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
+ uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
+
+ uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS.
+
+ uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase.
+ uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
+
+ uint32_t DcsPfGfxFopt; //Default to GFX FMIN
+ uint32_t DcsPfUclkFopt; //Default to UCLK FMIN
+
+ uint8_t FoptEnabled;
+ uint8_t DcsSpare2[3];
+ uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float
+ uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float
+ uint32_t DcsSpare[9];
+
+ // UCLK section
+ uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
+ uint8_t PaddingMem[3];
+
+ uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow)
+ uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow)
+ uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
+ uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
+ uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
+ uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
+ uint16_t DalDcModeMaxUclkFreq;
+ uint8_t PaddingsMem[2];
+ //FCLK Section
+ uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
+ uint16_t PaddingFclk;
+
+ // Link DPM Settings
+ uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
+ uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+ uint16_t LclkFreq[NUM_LINK_LEVELS];
+
+ // SECTION: VDD_GFX AVFS
+ uint8_t OverrideGfxAvfsFuses;
+ uint8_t GfxAvfsPadding[3];
+
+ uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
+ uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
+ //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+ uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
+ uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
+
+ uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+ uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
+
+ uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
+ uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
+
+ uint32_t dGbV_dT_vmin;
+ uint32_t dGbV_dT_vmax;
+
+ //Unused: PMFW-9370
+ uint32_t V2F_vmin_range_low;
+ uint32_t V2F_vmin_range_high;
+ uint32_t V2F_vmax_range_low;
+ uint32_t V2F_vmax_range_high;
+
+ AvfsDcBtcParams_t DcBtcGfxParams;
+ QuadraticInt_t SSCurve_GFX;
+ uint32_t GfxAvfsSpare[29];
+
+ //SECTION: VDD_SOC AVFS
+ uint8_t OverrideSocAvfsFuses;
+ uint8_t MinSocAvfsRevision;
+ uint8_t SocAvfsPadding[2];
+
+ AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT];
+
+ DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb
+
+ LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V
+
+ QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V
+
+ AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT];
+
+ QuadraticInt_t SSCurve_SOC;
+ uint32_t SocAvfsSpare[29];
+
+ //SECTION: Boot clock and voltage values
+ BootValues_t BootValues;
+
+ //SECTION: Driver Reported Clocks
+ DriverReportedClocks_t DriverReportedClocks;
+
+ //SECTION: Message Limits
+ MsgLimits_t MsgLimits;
+
+ //SECTION: OverDrive Limits
+ OverDriveLimits_t OverDriveLimitsBasicMin;
+ OverDriveLimits_t OverDriveLimitsBasicMax;
+ OverDriveLimits_t OverDriveLimitsAdvancedMin;
+ OverDriveLimits_t OverDriveLimitsAdvancedMax;
+
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[1];
+ uint16_t TotalBoardPowerRoc;
+
+ //PMFW-11158
+ QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
+ QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+
+ // APT GFX to UCLK mapping
+ int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6];
+ uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6];
+ uint32_t AptPadding;
+
+ // Xvmin didt
+ QuadraticInt_t GfxXvminDidtDroopThresh;
+ uint32_t GfxXvminDidtResetDDWait;
+ uint32_t GfxXvminDidtClkStopWait;
+ uint32_t GfxXvminDidtFcsStepCtrl;
+ uint32_t GfxXvminDidtFcsWaitCtrl;
+
+ // PSM based didt controller
+ uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode
+ uint32_t P2v_a; // floating point in U32 format
+ uint32_t P2v_b;
+ uint32_t P2v_c;
+ uint32_t T2p_a;
+ uint32_t T2p_b;
+ uint32_t T2p_c;
+ uint32_t P2vTemp;
+ QuadraticInt_t PsmDidtStaticSettings;
+ QuadraticInt_t PsmDidtDynamicSettings;
+ uint8_t PsmDidtAvgDiv;
+ uint8_t PsmDidtForceStall;
+ uint16_t PsmDidtReleaseTimer;
+ uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
+ // CAC EDC
+ uint32_t Leakage_C0; // in IEEE float
+ uint32_t Leakage_C1; // in IEEE float
+ uint32_t Leakage_C2; // in IEEE float
+ uint32_t Leakage_C3; // in IEEE float
+ uint32_t Leakage_C4; // in IEEE float
+ uint32_t Leakage_C5; // in IEEE float
+ uint32_t GFX_CLK_SCALAR; // in IEEE float
+ uint32_t GFX_CLK_INTERCEPT; // in IEEE float
+ uint32_t GFX_CAC_M; // in IEEE float
+ uint32_t GFX_CAC_B; // in IEEE float
+ uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
+ uint32_t DynToTotalCacScalar; // in IEEE
+ // GFX EDC XVMIN
+ uint32_t XVmin_Gfx_EdcThreshScalar;
+ uint32_t XVmin_Gfx_EdcEnableFreq;
+ uint32_t XVmin_Gfx_EdcPccAsStepCtrl;
+ uint32_t XVmin_Gfx_EdcPccAsWaitCtrl;
+ uint16_t XVmin_Gfx_EdcThreshold;
+ uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl;
+ // SOC EDC XVMIN
+ uint32_t XVmin_Soc_EdcThreshScalar;
+ uint32_t XVmin_Soc_EdcEnableFreq;
+ uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react.
+ uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted.
+ uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted.
+ uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts.
+ uint8_t PaddingSocEdc[3];
+
+ // Fuse Override for SOC and GFX XVMIN
+ uint8_t GfxXvminFuseOverride;
+ uint8_t SocXvminFuseOverride;
+ uint8_t PaddingXvminFuseOverride[2];
+ uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
+ uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
+ uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
+ uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
+
+
+ uint16_t GfxXvminFddVolt0; // low voltage, in VID
+ uint16_t GfxXvminFddVolt1; // mid voltage, in VID
+ uint16_t GfxXvminFddVolt2; // high voltage, in VID
+ uint16_t SocXvminFddVolt0; // low voltage, in VID
+ uint16_t SocXvminFddVolt1; // mid voltage, in VID
+ uint16_t SocXvminFddVolt2; // high voltage, in VID
+ uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse
+ uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse
+ uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse
+
+ // SECTION: Sku Reserved
+ uint32_t Spare;
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} SkuTable_t;
+
+typedef struct {
+ uint8_t SlewRateConditions;
+ uint8_t LoadLineAdjust;
+ uint8_t VoutOffset;
+ uint8_t VidMax;
+ uint8_t VidMin;
+ uint8_t TenBitTelEn;
+ uint8_t SixteenBitTelEn;
+ uint8_t OcpThresh;
+ uint8_t OcpWarnThresh;
+ uint8_t OcpSettings;
+ uint8_t VrhotThresh;
+ uint8_t OtpThresh;
+ uint8_t UvpOvpDeltaRef;
+ uint8_t PhaseShed;
+ uint8_t Padding[10];
+ uint32_t SettingOverrideMask;
+} Svi3RegulatorSettings_t;
+
+typedef struct {
+ // SECTION: Version
+ uint32_t Version; //should be unique to each board type
+
+ // SECTION: I2C Control
+ I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
+
+ //SECTION SVI3 Board Parameters
+ uint8_t SlaveAddrMapping[SVI_PLANE_COUNT];
+ uint8_t VrPsiSupport[SVI_PLANE_COUNT];
+
+ uint32_t Svi3SvcSpeed;
+ uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3
+
+ // SECTION: Voltage Regulator Settings
+ Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT];
+
+ // SECTION: GPIO Settings
+ uint8_t LedOffGpio;
+ uint8_t FanOffGpio;
+ uint8_t GfxVrPowerStageOffGpio;
+
+ uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
+ uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+
+ uint8_t GthrGpio; // GPIO pin configured for GTHR Event
+ uint8_t GthrPolarity; // replace GPIO polarity for GTHR
+
+ // LED Display Settings
+ uint8_t LedPin0; // GPIO number for LedPin[0]
+ uint8_t LedPin1; // GPIO number for LedPin[1]
+ uint8_t LedPin2; // GPIO number for LedPin[2]
+ uint8_t LedEnableMask;
+
+ uint8_t LedPcie; // GPIO number for PCIE results
+ uint8_t LedError; // GPIO number for Error Cases
+ uint8_t PaddingLed;
+
+ // SECTION: Clock Spread Spectrum
+
+ // UCLK Spread Spectrum
+ uint8_t UclkTrainingModeSpreadPercent; // Q4.4
+ uint8_t UclkSpreadPadding;
+ uint16_t UclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT];
+
+ // DFLL Spread Spectrum
+ uint8_t GfxclkSpreadEnable;
+
+ // FCLK Spread Spectrum
+ uint8_t FclkSpreadPercent; // Q4.4
+ uint16_t FclkSpreadFreq; // kHz
+
+ // Section: Memory Config
+ uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
+ uint8_t PaddingMem1[7];
+
+ // SECTION: UMC feature flags
+ uint8_t HsrEnabled;
+ uint8_t VddqOffEnabled;
+ uint8_t PaddingUmcFlags[2];
+
+ uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
+
+ uint8_t FuseWritePowerMuxPresent;
+ uint8_t FuseWritePadding[3];
+
+ // SECTION: EDC Params
+ uint32_t LoadlineGfx;
+ uint32_t LoadlineSoc;
+ uint32_t GfxEdcLimit;
+ uint32_t SocEdcLimit;
+
+ uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry
+ uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors
+
+ uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0
+ uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1
+ uint8_t PaddingEpcs[2];
+
+ // SECTION: Board Reserved
+ uint32_t BoardSpare[52];
+
+ // SECTION: Structure Padding
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} BoardTable_t;
+
+typedef struct {
+ // SECTION: Infrastructure Limits
+ uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported
+
+ uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
+ uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input
+
+ // SECTION: Fan Control
+ uint16_t FanStopTemp[TEMP_COUNT]; //Celsius
+ uint16_t FanStartTemp[TEMP_COUNT]; //Celsius
+
+ uint16_t FanGain[TEMP_COUNT];
+
+ uint16_t FanPwmMin;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
+ uint16_t FanMaximumRpm;
+ uint16_t MGpuAcousticLimitRpmThreshold;
+ uint16_t FanTargetGfxclk;
+ uint32_t TempInputSelectMask;
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanTachEdgePerRev;
+ uint16_t FanPadding;
+ uint16_t FanTargetTemperature[TEMP_COUNT];
+
+ // The following are AFC override parameters. Leave at 0 to use FW defaults.
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t FuzzyFan_Reserved;
+
+ uint16_t FwCtfLimit[TEMP_COUNT];
+
+ uint16_t IntakeTempEnableRPM;
+ int16_t IntakeTempOffsetTemp;
+ uint16_t IntakeTempReleaseTemp;
+ uint16_t IntakeTempHighIntakeAcousticLimit;
+
+ uint16_t IntakeTempAcouticLimitReleaseRate;
+ int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset
+ uint16_t FanStalledTriggerRpm; //
+ uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm
+
+ uint16_t FanSpare[1];
+ uint8_t FanIntakeSensorSupport;
+ uint8_t FanIntakePadding;
+ uint32_t FanAmbientPerfBoostThreshold;
+ uint32_t FanSpare2[12];
+
+ uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
+ uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
+ uint16_t TemperatureFwCtfLimit_Hynix;
+ uint16_t TemperatureFwCtfLimit_Micron;
+
+ // SECTION: Board Reserved
+ uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail
+ uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported
+ uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift
+ uint16_t CustomSkuSpare16b;
+ uint32_t CustomSkuSpare32b[10];
+
+ // SECTION: Structure Padding
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8];
+} CustomSkuTable_t;
+
+typedef struct {
+ PFE_Settings_t PFE_Settings;
+ SkuTable_t SkuTable;
+ CustomSkuTable_t CustomSkuTable;
+ BoardTable_t BoardTable;
+} PPTable_t;
+
+typedef struct {
+ // Time constant parameters for clock averages in ms
+ uint16_t GfxclkAverageLpfTau;
+ uint16_t FclkAverageLpfTau;
+ uint16_t UclkAverageLpfTau;
+ uint16_t GfxActivityLpfTau;
+ uint16_t UclkActivityLpfTau;
+ uint16_t UclkMaxActivityLpfTau;
+ uint16_t SocketPowerLpfTau;
+ uint16_t VcnClkAverageLpfTau;
+ uint16_t VcnUsageAverageLpfTau;
+ uint16_t PcieActivityLpTau;
+} DriverSmuConfig_t;
+
+typedef struct {
+ DriverSmuConfig_t DriverSmuConfig;
+
+ uint32_t Spare[8];
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DriverSmuConfigExternal_t;
+
+
+typedef struct {
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
+ uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
+
+ uint16_t Padding;
+
+ uint32_t Spare[32];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} DriverInfoTable_t;
+
+typedef struct {
+ uint32_t CurrClock[PPCLK_COUNT];
+
+ uint16_t AverageGfxclkFrequencyTarget;
+ uint16_t AverageGfxclkFrequencyPreDs;
+ uint16_t AverageGfxclkFrequencyPostDs;
+ uint16_t AverageFclkFrequencyPreDs;
+ uint16_t AverageFclkFrequencyPostDs;
+ uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock
+ uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock
+ uint16_t AverageVclk0Frequency ;
+ uint16_t AverageDclk0Frequency ;
+ uint16_t AverageVclk1Frequency ;
+ uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy ;
+ uint16_t dGPU_W_MAX ;
+ uint16_t padding ;
+
+ uint32_t MetricsCounter ;
+
+ uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
+
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint16_t Vcn0ActivityPercentage ;
+ uint16_t Vcn1ActivityPercentage ;
+
+ uint32_t EnergyAccumulator;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
+ uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t AvgTemperatureFanIntake;
+
+ uint8_t PcieRate ;
+ uint8_t PcieWidth ;
+
+ uint8_t AvgFanPwm;
+ uint8_t Padding[1];
+ uint16_t AvgFanRpm;
+
+
+ uint8_t ThrottlingPercentage[THROTTLER_COUNT];
+ uint8_t padding1[3];
+
+ //metrics for D3hot entry/exit and driver ARM msgs
+ uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+ uint16_t ApuSTAPMSmartShiftLimit;
+ uint16_t ApuSTAPMLimit;
+ uint16_t AvgApuSocketPower;
+
+ uint16_t AverageUclkActivity_MAX;
+
+ uint32_t PublicSerialNumberLower;
+ uint32_t PublicSerialNumberUpper;
+
+} SmuMetrics_t;
+
+typedef struct {
+ SmuMetrics_t SmuMetrics;
+ uint32_t Spare[30];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SmuMetricsExternal_t;
+
+typedef struct {
+ uint8_t WmSetting;
+ uint8_t Flags;
+ uint8_t Padding[2];
+
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+
+typedef enum {
+ WATERMARKS_CLOCK_RANGE = 0,
+ WATERMARKS_DUMMY_PSTATE,
+ WATERMARKS_MALL,
+ WATERMARKS_COUNT,
+} WATERMARKS_FLAGS_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
+} Watermarks_t;
+
+typedef struct {
+ Watermarks_t Watermarks;
+ uint32_t Spare[16];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} WatermarksExternal_t;
+
+typedef struct {
+ uint16_t avgPsmCount[76];
+ uint16_t minPsmCount[76];
+ uint16_t maxPsmCount[76];
+ float avgPsmVoltage[76];
+ float minPsmVoltage[76];
+ float maxPsmVoltage[76];
+} AvfsDebugTable_t;
+
+typedef struct {
+ AvfsDebugTable_t AvfsDebugTable;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsDebugTableExternal_t;
+
+
+typedef struct {
+ uint8_t Gfx_ActiveHystLimit;
+ uint8_t Gfx_IdleHystLimit;
+ uint8_t Gfx_FPS;
+ uint8_t Gfx_MinActiveFreqType;
+ uint8_t Gfx_BoosterFreqType;
+ uint8_t PaddingGfx;
+ uint16_t Gfx_MinActiveFreq; // MHz
+ uint16_t Gfx_BoosterFreq; // MHz
+ uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Gfx_PD_Data_limit_a; // Q16
+ uint32_t Gfx_PD_Data_limit_b; // Q16
+ uint32_t Gfx_PD_Data_limit_c; // Q16
+ uint32_t Gfx_PD_Data_error_coeff; // Q16
+ uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
+
+ uint8_t Fclk_ActiveHystLimit;
+ uint8_t Fclk_IdleHystLimit;
+ uint8_t Fclk_FPS;
+ uint8_t Fclk_MinActiveFreqType;
+ uint8_t Fclk_BoosterFreqType;
+ uint8_t PaddingFclk;
+ uint16_t Fclk_MinActiveFreq; // MHz
+ uint16_t Fclk_BoosterFreq; // MHz
+ uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Fclk_PD_Data_limit_a; // Q16
+ uint32_t Fclk_PD_Data_limit_b; // Q16
+ uint32_t Fclk_PD_Data_limit_c; // Q16
+ uint32_t Fclk_PD_Data_error_coeff; // Q16
+ uint32_t Fclk_PD_Data_error_rate_coeff; // Q16
+
+ uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16
+ uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS];
+ uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS];
+ uint16_t Mem_Fps;
+
+} DpmActivityMonitorCoeffInt_t;
+
+
+typedef struct {
+ DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt;
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DpmActivityMonitorCoeffIntExternal_t;
+
+
+
+// Workload bits
+#define WORKLOAD_PPLIB_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7
+#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8
+#define WORKLOAD_PPLIB_CGVDI_BIT 9
+#define WORKLOAD_PPLIB_COUNT 10
+
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+
+// Table transfer status
+#define TABLE_TRANSFER_OK 0x0
+#define TABLE_TRANSFER_FAILED 0xFF
+#define TABLE_TRANSFER_PENDING 0xAB
+
+// Table types
+#define TABLE_PPTABLE 0
+#define TABLE_COMBO_PPTABLE 1
+#define TABLE_WATERMARKS 2
+#define TABLE_AVFS_PSM_DEBUG 3
+#define TABLE_PMSTATUSLOG 4
+#define TABLE_SMU_METRICS 5
+#define TABLE_DRIVER_SMU_CONFIG 6
+#define TABLE_ACTIVITY_MONITOR_COEFF 7
+#define TABLE_OVERDRIVE 8
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_DRIVER_INFO 10
+#define TABLE_ECCINFO 11
+#define TABLE_CUSTOM_SKUTABLE 12
+#define TABLE_COUNT 13
+
+//IH Interupt ID
+#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2
+#define IH_INTERRUPT_CONTEXT_ID_AC 0x3
+#define IH_INTERRUPT_CONTEXT_ID_DC 0x4
+#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
+#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
+#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
+#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 5bb7a63c0602..97522c085258 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare;
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_v14_0_1;
+
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
-#define TABLE_SPARE0 5 // Unused
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 7b812b9994d7..0b3c2f54a343 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xB
+#define SMU_METRICS_TABLE_VERSION 0xC
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -223,6 +223,10 @@ typedef struct __attribute__((packed, aligned(4))) {
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[32];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
index 356e0f57a426..ddb625860083 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
@@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
-#define FEATURE_VCN_DPM_BIT 10
+#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
-#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
+#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
-#define FEATURE_SPARE0_BIT 26 //SPARE
+#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
-#define FEATURE_SMART_L3_RINSER_BIT 34
-#define FEATURE_SPARE1_BIT 35 //SPARE
+#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
+#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
-#define FEATURE_DS_VCN_BIT 45
+#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
-#define FEATURE_SPARE2_BIT 49 //SPARE
+#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
-#define FEATURE_SPARE_61 61
-#define FEATURE_FP_DIDT 62
+#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
+#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t DpmHandlerID : 8;
+ uint32_t ActivityMonitorID : 8;
+ uint32_t DpmTimerID : 8;
+ uint32_t DpmHubID : 4;
+ uint32_t DpmHubTask : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t CclkSyncStatus : 8;
+ uint32_t ZstateStatus : 4;
+ uint32_t Cpu1VddOff : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
+ uint32_t GfxOffStatus : 2;
+ uint32_t Cpu0Off : 2;
+ uint32_t Cpu1Off : 2;
+ uint32_t Cpu0VddOff : 2;
+ // MP1_EXT_SCRATCH2
+ uint32_t P2JobHandler :32;
+ // MP1_EXT_SCRATCH3
+ uint32_t PostCode :32;
+ // MP1_EXT_SCRATCH4
+ uint32_t MsgPortBusy :15;
+ uint32_t RsmuPmiP1Pending : 1;
+ uint32_t RsmuPmiP2PendingCnt : 8;
+ uint32_t DfCstateExitPending : 1;
+ uint32_t Pc6EntryPending : 1;
+ uint32_t Pc6ExitPending : 1;
+ uint32_t WarmResetPending : 1;
+ uint32_t Mp0ClkPending : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t spare2 : 2;
+ // MP1_EXT_SCRATCH5
+ uint32_t IdleMask :32;
+ // MP1_EXT_SCRATCH6 = RTOS threads' status
+ // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t_v14_0_1;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index 8a8a57c56bc0..c4dc5881d8df 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -54,14 +54,14 @@
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
-#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
-#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
-#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
-#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
-#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
-#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
@@ -71,36 +71,32 @@
#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
-#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-
+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
-
-#define PPSMC_MSG_spare_0x17 0x17
-#define PPSMC_MSG_spare_0x18 0x18
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
-
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
-#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)
-#define PPSMC_MSG_spare_0x20 0x20
-#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg
-#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default
-
+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
-#define PPSMC_MSG_Reserved 0x26 ///< Not used
-#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp
-#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp
+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
-#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
new file mode 100644
index 000000000000..de2e442281ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_V14_0_2_PPSMC_H
+#define SMU_V14_0_2_PPSMC_H
+
+#define PPSMC_VERSION 0x1
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x15
+#define PPSMC_MSG_ExitBaco 0x16
+#define PPSMC_MSG_ArmD3 0x17
+#define PPSMC_MSG_BacoAudioD3PME 0x18
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x19
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1A
+#define PPSMC_MSG_SetHardMinByFreq 0x1B
+#define PPSMC_MSG_SetHardMaxByFreq 0x1C
+#define PPSMC_MSG_GetMinDpmFreq 0x1D
+#define PPSMC_MSG_GetMaxDpmFreq 0x1E
+#define PPSMC_MSG_GetDpmFreqByIndex 0x1F
+#define PPSMC_MSG_OverridePcieParameters 0x20
+
+//DramLog Set DramAddr
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x22
+#define PPSMC_MSG_DramLogSetDramSize 0x23
+#define PPSMC_MSG_SetWorkloadMask 0x24
+
+#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed
+#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
+
+//Power Gating
+#define PPSMC_MSG_AllowGfxOff 0x28
+#define PPSMC_MSG_DisallowGfxOff 0x29
+#define PPSMC_MSG_PowerUpVcn 0x2A
+#define PPSMC_MSG_PowerDownVcn 0x2B
+#define PPSMC_MSG_PowerUpJpeg 0x2C
+#define PPSMC_MSG_PowerDownJpeg 0x2D
+
+//Resets
+#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
+#define PPSMC_MSG_Mode1Reset 0x2F
+
+//Set SystemVirtual DramAddrHigh
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31
+//ACDC Power Source
+#define PPSMC_MSG_SetPptLimit 0x32
+#define PPSMC_MSG_GetPptLimit 0x33
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x34
+#define PPSMC_MSG_NotifyPowerSource 0x35
+
+//BTC
+#define PPSMC_MSG_RunDcBtc 0x36
+
+// 0x37
+
+//Others
+#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed
+#define PPSMC_MSG_SetFwDstatesMask 0x39
+#define PPSMC_MSG_SetThrottlerMask 0x3A
+
+#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B
+
+#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C
+
+//STB to dram log
+#define PPSMC_MSG_DumpSTBtoDram 0x3D
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
+#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
+
+#define PPSMC_MSG_AllowGfxDcs 0x43
+#define PPSMC_MSG_DisallowGfxDcs 0x44
+#define PPSMC_MSG_EnableAudioStutterWA 0x45
+#define PPSMC_MSG_PowerUpUmsch 0x46
+#define PPSMC_MSG_PowerDownUmsch 0x47
+#define PPSMC_MSG_SetDcsArch 0x48
+#define PPSMC_MSG_TriggerVFFLR 0x49
+#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A
+#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
+#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
+#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
+#define PPSMC_MSG_Mode3Reset 0x4F
+#define PPSMC_Message_Count 0x50
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a941fdbf78b6..c48214e3dc8e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -115,6 +115,10 @@
__SMU_DUMMY_MAP(PowerDownVcn), \
__SMU_DUMMY_MAP(PowerUpJpeg), \
__SMU_DUMMY_MAP(PowerDownJpeg), \
+ __SMU_DUMMY_MAP(PowerUpJpeg0), \
+ __SMU_DUMMY_MAP(PowerDownJpeg0), \
+ __SMU_DUMMY_MAP(PowerUpJpeg1), \
+ __SMU_DUMMY_MAP(PowerDownJpeg1), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
__SMU_DUMMY_MAP(RunDcBtc), \
@@ -135,6 +139,8 @@
__SMU_DUMMY_MAP(PowerUpSdma), \
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinVcn), \
+ __SMU_DUMMY_MAP(SetHardMinVcn0), \
+ __SMU_DUMMY_MAP(SetHardMinVcn1), \
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
__SMU_DUMMY_MAP(ActiveProcessNotify), \
@@ -150,6 +156,8 @@
__SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetSoftMinVcn), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn1), \
__SMU_DUMMY_MAP(EnablePostCode), \
__SMU_DUMMY_MAP(GetGfxclkFrequency), \
__SMU_DUMMY_MAP(GetFclkFrequency), \
@@ -161,6 +169,8 @@
__SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxVcn), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn1), \
__SMU_DUMMY_MAP(PowerGateMmHub), \
__SMU_DUMMY_MAP(UpdatePmeRestore), \
__SMU_DUMMY_MAP(GpuChangeState), \
@@ -435,4 +445,11 @@ enum smu_feature_mask {
SMU_FEATURE_COUNT,
};
+/* Message category flags */
+#define SMU_MSG_VF_FLAG (1U << 0)
+#define SMU_MSG_RAS_PRI (1U << 1)
+
+/* Firmware capability flags */
+#define SMU_FW_CAP_RAS_PRI (1U << 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index a0e5ad0381d6..c2ab336bb530 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-bool smu_v11_0_baco_is_support(struct smu_context *smu);
+int smu_v11_0_get_bamaco_support(struct smu_context *smu);
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index fbd57fa1a004..d9700a3f28d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -210,7 +210,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-bool smu_v13_0_baco_is_support(struct smu_context *smu);
+int smu_v13_0_get_bamaco_support(struct smu_context *smu);
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 3f7463c1c1a9..1fc4557e6fb4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -27,7 +27,8 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -38,7 +39,8 @@
#define MP1_SRAM 0x03c00004
/* address block */
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
+#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
#define MAX_DPM_LEVELS 16
@@ -159,7 +161,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu);
int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq);
-bool smu_v14_0_baco_is_support(struct smu_context *smu);
+int smu_v14_0_get_bamaco_support(struct smu_context *smu);
enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
new file mode 100644
index 000000000000..4a3fde89aed7
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_14_0_2_PPTABLE_H
+#define SMU_14_0_2_PPTABLE_H
+
+
+#pragma pack(push, 1)
+
+#define SMU_14_0_2_TABLE_FORMAT_REVISION 3
+
+// POWERPLAYTABLE::ulPlatformCaps
+#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
+#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS.
+#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly.
+#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry.
+#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry.
+#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate.
+#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED.
+#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive.
+
+// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type
+#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
+
+#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
+#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
+
+enum SMU_14_0_2_OD_SW_FEATURE_CAP
+{
+ SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0,
+ SMU_14_0_2_ODCAP_POWER_MODE = 1,
+ SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2,
+ SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3,
+ SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4,
+ SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5,
+ SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6,
+ SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7,
+ SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8,
+ SMU_14_0_2_ODCAP_COUNT = 9,
+};
+
+enum SMU_14_0_2_OD_SW_FEATURE_ID
+{
+ SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM
+ SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode
+ SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK
+ SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK
+ SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK
+ SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning
+ SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning
+ SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning
+ SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC
+};
+
+#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features
+
+enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID
+{
+ SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0,
+ SMU_14_0_2_ODSETTING_POWER_MODE = 1,
+ SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2,
+ SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3,
+ SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4,
+ SMU_14_0_2_ODSETTING_ACTIMING = 5,
+ SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6,
+ SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7,
+ SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8,
+ SMU_14_0_2_ODSETTING_COUNT = 9,
+};
+#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings
+
+enum SMU_14_0_2_PWRMODE_SETTING
+{
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO,
+ SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
+ SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
+};
+#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
+
+enum SMU_14_0_2_overdrive_table_id
+{
+ SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0,
+ SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1,
+ SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2,
+};
+
+struct smu_14_0_2_overdrive_table
+{
+ uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION
+ uint8_t reserve[3]; // Zero filled field reserved for future use
+ uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags
+ int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings
+ int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings
+ int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
+};
+
+struct smu_14_0_2_powerplay_table
+{
+ struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
+ uint8_t table_revision; // PPGen use only: table_revision = 3
+ uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t).
+ uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
+ uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
+ uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable.
+ uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t.
+ uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable.
+ uint16_t pmfw_board_table_size; // The size of BoardTable_t.
+ uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
+ uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
+ uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base
+ uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base
+ uint16_t format_id; // PPGen use only: PPTable for different ASICs.
+ uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps
+
+ uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER
+
+ uint16_t small_power_limit1;
+ uint16_t small_power_limit2;
+ uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit.
+ uint16_t software_shutdown_temp;
+
+ uint8_t reserve[143]; // Zero filled field reserved for future use
+
+ struct smu_14_0_2_overdrive_table overdrive_table;
+
+ PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 0c2d04f978ac..6d334a2aff67 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2387,7 +2387,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = smu_v11_0_baco_enter,
.baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 836b1df79928..5a68d365967f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3538,7 +3538,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = navi10_baco_enter,
.baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 1f18b61884f3..e426f457a017 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4431,7 +4431,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
+ .get_bamaco_support = smu_v11_0_get_bamaco_support,
.baco_enter = sienna_cichlid_baco_enter,
.baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index f6545093bfc1..9d5ab2ea643a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[30];
+ char ucode_prefix[25];
char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -1557,23 +1557,27 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
-bool smu_v11_0_baco_is_support(struct smu_context *smu)
+int smu_v11_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return bamaco_support |= BACO_SUPPORT;
/* Arcturus does not support this bit mask */
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
@@ -1603,7 +1607,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- if (amdgpu_runtime_pm == 2)
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
D3HOT_BAMACO_SEQUENCE,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index da1f43999d09..379e44eb0019 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -301,7 +301,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) /
@@ -1507,6 +1507,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f41ac6465f2a..ce941fbb9cfb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -759,8 +759,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK");
- fallthrough;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK");
+ *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ return 0;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
if (ret) {
@@ -788,8 +791,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK");
- fallthrough;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK");
+ *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ return 0;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
if (ret) {
@@ -850,7 +856,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
}
switch (type) {
- case SMU_OD_SCLK:
case SMU_SCLK:
for (i = 0; i < display_levels; i++) {
clock_mhz = freq_values[i];
@@ -863,7 +868,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu,
}
break;
- case SMU_OD_MCLK:
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_FCLK:
@@ -1581,11 +1585,11 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
-static bool aldebaran_is_baco_supported(struct smu_context *smu)
+static int aldebaran_get_bamaco_support(struct smu_context *smu)
{
/* aldebaran is not support baco */
- return false;
+ return 0;
}
static int aldebaran_set_df_cstate(struct smu_context *smu,
@@ -2059,7 +2063,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.register_irq_handler = smu_v13_0_register_irq_handler,
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = aldebaran_is_baco_supported,
+ .get_bamaco_support = aldebaran_get_bamaco_support,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 48170bb5112e..a8d34adc7d3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -93,7 +93,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char fw_name[30];
- char ucode_prefix[30];
+ char ucode_prefix[15];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -2247,7 +2247,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
@@ -2268,33 +2268,36 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu,
return ret;
}
-bool smu_v13_0_baco_is_support(struct smu_context *smu)
+int smu_v13_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return bamaco_support |= BACO_SUPPORT;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
int smu_v13_0_baco_enter(struct smu_context *smu)
{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO);
} else {
ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9c03296f92cd..1e09d5f2d82f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
@@ -3070,7 +3076,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.deep_sleep_control = smu_v13_0_deep_sleep_control,
.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
- .baco_is_support = smu_v13_0_baco_is_support,
+ .get_bamaco_support = smu_v13_0_get_bamaco_support,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bb98156b2fa1..88f1a0d878f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && !adev->in_s0ix) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
@@ -318,7 +328,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->AverageSocketPower << 8) / 1000;
@@ -572,6 +582,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_4_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_4_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 0dce672ac1b9..218f209c3775 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -286,7 +286,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
@@ -332,6 +332,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v13_0_5_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3957af057d54..4d3eca2fc3f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -138,13 +138,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -167,10 +167,10 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
- MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0),
- MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0),
- MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
- MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
};
@@ -1010,8 +1010,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
- fallthrough;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->gfxclk_pstate.curr.min,
+ pstate_table->gfxclk_pstate.curr.max);
+ break;
case SMU_SCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
&now);
@@ -1052,8 +1055,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
- fallthrough;
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ pstate_table->uclk_pstate.curr.min,
+ pstate_table->uclk_pstate.curr.max);
+ break;
case SMU_MCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
&now);
@@ -1670,6 +1676,11 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_type == SMU_UCLK) {
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
+ /* For VF, only allowed in FW versions 85.102 or greater */
+ if (amdgpu_sriov_vf(adev) &&
+ ((smu->smc_fw_version < 0x556600) ||
+ (adev->flags & AMD_IS_APU)))
+ return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, max);
@@ -2077,11 +2088,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
adev->unique_id = pptable->PublicSerialNumber_AID;
}
-static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
+static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu)
{
/* smu_13_0_6 does not support baco */
- return false;
+ return 0;
}
static const char *const throttling_logging_label[] = {
@@ -2228,7 +2239,15 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
- if (!amdgpu_sriov_vf(adev)) {
+ /*Check smu version, PCIE link speed and width will be reported from pmfw metric
+ * table for both pf & one vf for smu version 85.99.0 or higher else report only
+ * for pf from registers
+ */
+ if (smu->smc_fw_version >= 0x556300) {
+ gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
+ gpu_metrics->pcie_link_speed =
+ pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
+ } else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
link_width_level = 0;
@@ -2238,6 +2257,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
}
+
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
@@ -2294,6 +2314,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(adev->pdev, i * 4,
+ adev->pdev->saved_config_space[i]);
+ pci_restore_msi_state(adev->pdev);
+}
+
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -2315,6 +2346,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
+ /* Certain platforms have switches which assign virtual BAR values to
+ * devices. OS uses the virtual BAR values and device behind the switch
+ * is assgined another BAR value. When device's config space registers
+ * are queried, switch returns the virtual BAR values. When mode-2 reset
+ * is performed, switch is unaware of it, and will continue to return
+ * the same virtual values to the OS.This affects
+ * pci_restore_config_space() API as it doesn't write the value saved if
+ * the current value read from config space is the same as what is
+ * saved. As a workaround, make sure the config space is restored
+ * always.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ smu_v13_0_6_restore_pci_config(smu);
+
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);
@@ -2671,6 +2716,11 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
umc_v12_0_is_correctable_error(adev, status0))
*count = (ext_error_code == 0) ? odecc_err_cnt : 1;
+ amdgpu_umc_update_ecc_status(adev,
+ entry->regs[MCA_REG_IDX_STATUS],
+ entry->regs[MCA_REG_IDX_IPID],
+ entry->regs[MCA_REG_IDX_ADDR]);
+
return 0;
}
@@ -2684,7 +2734,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ (ext_error_code == 0 || ext_error_code == 9))
*count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
*count = err_cnt;
@@ -2975,7 +3026,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
return smu_v13_0_6_mca_set_debug_mode(smu, enable);
}
-static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count)
+static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count)
{
uint32_t msg;
int ret;
@@ -2984,10 +3035,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err
return -EINVAL;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
msg = SMU_MSG_QueryValidMcaCount;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
msg = SMU_MSG_QueryValidMcaCeCount;
break;
default:
@@ -3004,14 +3055,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err
}
static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
- enum aca_error_type type, u32 *count)
+ enum aca_smu_type type, u32 *count)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
switch (type) {
- case ACA_ERROR_TYPE_UE:
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_UE:
+ case ACA_SMU_TYPE_CE:
ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
break;
default:
@@ -3022,16 +3073,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
return ret;
}
-static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val)
{
uint32_t msg, param;
switch (type) {
- case ACA_ERROR_TYPE_UE:
+ case ACA_SMU_TYPE_UE:
msg = SMU_MSG_McaBankDumpDW;
break;
- case ACA_ERROR_TYPE_CE:
+ case ACA_SMU_TYPE_CE:
msg = SMU_MSG_McaBankCeDumpDW;
break;
default:
@@ -3043,7 +3094,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t
return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
}
-static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val, int count)
{
int ret, i;
@@ -3060,7 +3111,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ
return 0;
}
-static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type,
+static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type,
int idx, int reg_idx, u64 *val)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3077,13 +3128,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ
*val = (u64)data[1] << 32 | data[0];
dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
- type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+ type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
return 0;
}
static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
- enum aca_error_type type, int idx, struct aca_bank *bank)
+ enum aca_smu_type type, int idx, struct aca_bank *bank)
{
int i, ret, count;
@@ -3097,12 +3148,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
return 0;
}
+static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+ int error_code;
+
+ if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
+ else
+ error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+
+ return error_code & 0xff;
+}
+
static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.max_ue_bank_count = 12,
.max_ce_bank_count = 12,
.set_debug_mode = aca_smu_set_debug_mode,
.get_valid_aca_count = aca_smu_get_valid_aca_count,
.get_valid_aca_bank = aca_smu_get_valid_aca_bank,
+ .parse_error_code = aca_smu_parse_error_code,
};
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
.setup_pptable = smu_v13_0_6_setup_pptable,
- .baco_is_support = smu_v13_0_6_is_baco_supported,
+ .get_bamaco_support = smu_v13_0_6_get_bamaco_support,
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
@@ -3208,6 +3272,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 7318964f1f14..e996a0a4d33e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2650,7 +2650,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
- .baco_is_support = smu_v13_0_baco_is_support,
+ .get_bamaco_support = smu_v13_0_get_bamaco_support,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 2d1736234b4a..d8bcf765a803 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -363,7 +363,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
@@ -423,6 +423,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = yellow_carp_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = yellow_carp_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
index ddbac5c655f7..4593e29e8ff8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o
+SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o
AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index b06a3cc43305..68b9bf822e8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -38,8 +38,13 @@
#include "amdgpu_ras.h"
#include "smu_cmn.h"
-#include "asic_reg/mp/mp_14_0_0_offset.h"
-#include "asic_reg/mp/mp_14_0_0_sh_mask.h"
+#include "asic_reg/mp/mp_14_0_2_offset.h"
+#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
+
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -52,6 +57,7 @@
#undef pr_debug
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
@@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu)
int smu_v14_0_load_microcode(struct smu_context *smu)
{
-#if 0
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
const struct smc_firmware_header_v1_0 *hdr;
@@ -131,8 +136,13 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
for (i = 0; i < adev->usec_timeout; i++) {
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
break;
@@ -142,9 +152,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
if (i == adev->usec_timeout)
return -ETIME;
-#endif
return 0;
-
}
int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
@@ -165,6 +173,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -198,7 +210,12 @@ int smu_v14_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
@@ -227,16 +244,16 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
adev->pm.fw_version = smu_version;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 2):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
- break;
case IP_VERSION(14, 0, 0):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
-
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
amdgpu_ip_version(adev, MP1_HWIP, 0));
@@ -738,9 +755,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
struct amdgpu_device *adev = smu->adev;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -841,9 +858,16 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
break;
case AMDGPU_IRQ_STATE_ENABLE:
@@ -851,14 +875,26 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
-
- val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
- val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
- WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
break;
default:
@@ -868,11 +904,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
+#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
static int smu_v14_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- // TODO
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == SOC15_IH_CLIENTID_THM) {
+ switch (src_id) {
+ case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ break;
+ case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+ break;
+ default:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
+ break;
+ }
+ }
return 0;
}
@@ -894,7 +951,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
irq_src->num_types = 1;
irq_src->funcs = &smu_v14_0_irq_funcs;
- // TODO: THM related
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_L2H,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_H2L,
+ irq_src);
+ if (ret)
+ return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
SMU_IH_INTERRUPT_ID_TO_DRIVER,
@@ -1402,9 +1469,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
if (adev->vcn.harvest_config & (1 << i))
continue;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
if (ret)
return ret;
}
@@ -1415,9 +1495,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
bool enable)
{
- return smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
- 0, NULL);
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
int smu_v14_0_run_btc(struct smu_context *smu)
@@ -1552,23 +1657,27 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
return 0;
}
-bool smu_v14_0_baco_is_support(struct smu_context *smu)
+int smu_v14_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) ||
!smu_baco->platform_support)
- return false;
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
+ return 0;
- return true;
+ return (bamaco_support |= BACO_SUPPORT);
}
enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
@@ -1591,7 +1700,7 @@ int smu_v14_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- smu_baco->maco_support ?
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 9310c4758e38..e4419e1561ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
- MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1),
+ MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1),
+ MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1),
+ MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1),
+ MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1),
+ MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
@@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1),
- MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1),
+ MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1),
MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
@@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
- MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1),
+ MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1),
+ MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1),
+ MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1),
+ MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1),
+ MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1),
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
@@ -154,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -164,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@@ -355,6 +362,12 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -586,6 +599,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks0[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks0[dpm_level];
+ break;
+ case SMU_VCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks1[dpm_level];
+ break;
+ case SMU_DCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks1[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumMemPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->FclkClocks_Freq[dpm_level];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@@ -630,6 +697,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+
+ return 0;
+}
+
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@@ -650,6 +730,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@@ -659,6 +741,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
+static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+ break;
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@@ -729,7 +931,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@@ -761,7 +963,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@@ -771,6 +973,19 @@ failed:
return ret;
}
+static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ return 0;
+}
+
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -804,6 +1019,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ *count = clk_table->Vcn0ClkLevelsEnabled;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ *count = clk_table->Vcn1ClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumMemPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumFclkLevelsEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@@ -833,6 +1079,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
+
+ return 0;
+}
+
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -859,18 +1117,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+ ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@@ -933,8 +1193,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
- msg_set_min = SMU_MSG_SetHardMinVcn;
- msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ msg_set_min = SMU_MSG_SetHardMinVcn0;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn0;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ msg_set_min = SMU_MSG_SetHardMinVcn1;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@@ -964,11 +1229,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@@ -993,25 +1258,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -1060,6 +1325,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
+static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1072,6 +1349,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
+static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
+
+ return 0;
+}
+
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@@ -1088,6 +1375,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1107,6 +1413,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
+static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_14_0_0_get_dpm_table(smu, clock_table);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_14_0_1_get_dpm_table(smu, clock_table);
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1128,16 +1444,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
- .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
- .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+ .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
- .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
+ .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
new file mode 100644
index 000000000000..706265220292
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -0,0 +1,1796 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "smu14_driver_if_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "smu_v14_0_2_ppt.h"
+#include "smu_v14_0_2_pptable.h"
+#include "smu_v14_0_2_ppsmc.h"
+#include "mp/mp_14_0_2_offset.h"
+#include "mp/mp_14_0_2_sh_mask.h"
+
+#include "smu_cmn.h"
+#include "amdgpu_ras.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
+
+#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
+
+static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
+ MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
+ MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
+ PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
+ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, PPCLK_GFXCLK),
+ CLK_MAP(SCLK, PPCLK_GFXCLK),
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK_0),
+ CLK_MAP(DCLK, PPCLK_DCLK_0),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(FW_DATA_READ),
+ FEA_MAP(DPM_GFXCLK),
+ FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
+ FEA_MAP(DPM_UCLK),
+ FEA_MAP(DPM_FCLK),
+ FEA_MAP(DPM_SOCCLK),
+ FEA_MAP(DPM_LINK),
+ FEA_MAP(DPM_DCN),
+ FEA_MAP(VMEMP_SCALING),
+ FEA_MAP(VDDIO_MEM_SCALING),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_FCLK),
+ FEA_MAP(DS_LCLK),
+ FEA_MAP(DS_DCFCLK),
+ FEA_MAP(DS_UCLK),
+ FEA_MAP(GFX_ULV),
+ FEA_MAP(FW_DSTATE),
+ FEA_MAP(GFXOFF),
+ FEA_MAP(BACO),
+ FEA_MAP(MM_DPM),
+ FEA_MAP(SOC_MPCLK_DS),
+ FEA_MAP(BACO_MPCLK_DS),
+ FEA_MAP(THROTTLERS),
+ FEA_MAP(SMARTSHIFT),
+ FEA_MAP(GTHR),
+ FEA_MAP(ACDC),
+ FEA_MAP(VR0HOT),
+ FEA_MAP(FW_CTF),
+ FEA_MAP(FAN_CONTROL),
+ FEA_MAP(GFX_DCS),
+ FEA_MAP(GFX_READ_MARGIN),
+ FEA_MAP(LED_DISPLAY),
+ FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
+ FEA_MAP(OUT_OF_BAND_MONITOR),
+ FEA_MAP(OPTIMIZED_VMIN),
+ FEA_MAP(GFX_IMU),
+ FEA_MAP(BOOT_TIME_CAL),
+ FEA_MAP(GFX_PCC_DFLL),
+ FEA_MAP(SOC_CG),
+ FEA_MAP(DF_CSTATE),
+ FEA_MAP(GFX_EDC),
+ FEA_MAP(BOOT_POWER_OPT),
+ FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
+ FEA_MAP(DS_VCN),
+ FEA_MAP(BACO_CG),
+ FEA_MAP(MEM_TEMP_READ),
+ FEA_MAP(ATHUB_MMHUB_PG),
+ FEA_MAP(SOC_PCC),
+ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PPTABLE),
+ TAB_MAP(WATERMARKS),
+ TAB_MAP(AVFS_PSM_DEBUG),
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(DRIVER_SMU_CONFIG),
+ TAB_MAP(ACTIVITY_MONITOR_COEFF),
+ [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
+ TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+ PWR_MAP(AC),
+ PWR_MAP(DC),
+};
+
+static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
+};
+
+#if 0
+static const uint8_t smu_v14_0_2_throttler_map[] = {
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
+ [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
+ [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
+ [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
+ [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
+ [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
+ [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
+};
+#endif
+
+static int
+smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ struct amdgpu_device *adev = smu->adev;
+ /*u32 smu_version;*/
+
+ if (num > 2)
+ return -EINVAL;
+
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ }
+#if 0
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
+
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ }
+
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ }
+
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+#endif
+
+ return 0;
+}
+
+static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+ if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
+ smu_baco->maco_support = true;
+ }
+
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+ !overdrive_upperlimits->FeatureCtrlMask)
+ smu->od_enabled = false;
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
+ /*
+ * Instead of having its own buffer space and get overdrive_table copied,
+ * smu->od_settings just points to the actual overdrive_table
+ */
+ smu->od_settings = &powerplay_table->overdrive_table;
+
+ smu->adev->pm.no_fan =
+ !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
+
+ return 0;
+}
+
+static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
+ sizeof(PPTable_t));
+
+ return 0;
+}
+
+#ifndef atom_smc_dpm_info_table_14_0_0
+struct atom_smc_dpm_info_table_14_0_0 {
+ struct atom_common_table_header table_header;
+ BoardTable_t BoardTable;
+};
+#endif
+
+static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table;
+ BoardTable_t *BoardTable = &smc_pptable->BoardTable;
+ int index, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
+
+ return 0;
+}
+
+#if 0
+static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_14_0_powerplay_table);
+
+ return 0;
+}
+#endif
+
+static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_14_0_2_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
+ if (!adev->scpm_enabled)
+ ret = smu_v14_0_setup_pptable(smu);
+ else
+ ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
+
+ ret = smu_v14_0_2_store_powerplay_table(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * With SCPM enabled, the operation below will be handled
+ * by PSP. Driver involvment is unnecessary and useless.
+ */
+ if (!adev->scpm_enabled) {
+ ret = smu_v14_0_2_append_powerplay_table(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_v14_0_2_check_powerplay_table(smu);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_2_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+ smu_table->metrics_time = 0;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ goto err2_out;
+
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
+ return 0;
+
+err3_out:
+ kfree(smu_table->watermarks_table);
+err2_out:
+ kfree(smu_table->gpu_metrics_table);
+err1_out:
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_2_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v14_0_2_allocate_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return smu_v14_0_init_smc_tables(smu);
+}
+
+static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ struct smu_14_0_dpm_table *dpm_table;
+ struct smu_14_0_pcie_table *pcie_table;
+ uint32_t link_level;
+ int ret = 0;
+
+ /* socclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_SOCCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* gfxclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_GFXCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+
+ /*
+ * Update the reported maximum shader clock to the value
+ * which can be guarded to be achieved on all cards. This
+ * is aligned with Window setting. And considering that value
+ * might be not the peak frequency the card can achieve, it
+ * is normal some real-time clock frequency can overtake this
+ * labelled maximum clock frequency(for example in pp_dpm_sclk
+ * sysfs output).
+ */
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* uclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_UCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* fclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_FCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* vclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_VCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* dclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
+ ret = smu_v14_0_set_single_dpm_table(smu,
+ SMU_DCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ /* lclk dpm table setup */
+ pcie_table = &dpm_context->dpm_tables.pcie_table;
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ return 0;
+}
+
+static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint64_t feature_enabled;
+
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static void smu_v14_0_2_dump_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ PFE_Settings_t *PFEsettings = &pptable->PFE_Settings;
+
+ dev_info(smu->adev->dev, "Dumped PPTable:\n");
+
+ dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version);
+ dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]);
+ dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]);
+}
+
+static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
+{
+ uint32_t throttler_status = 0;
+ int i;
+
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
+
+ return throttler_status;
+}
+
+#define SMU_14_0_2_BUSY_THRESHOLD 5
+static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->CurrClock[PPCLK_GFXCLK];
+ break;
+ case METRICS_CURR_SOCCLK:
+ *value = metrics->CurrClock[PPCLK_SOCCLK];
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->CurrClock[PPCLK_UCLK];
+ break;
+ case METRICS_CURR_VCLK:
+ *value = metrics->CurrClock[PPCLK_VCLK_0];
+ break;
+ case METRICS_CURR_DCLK:
+ *value = metrics->CurrClock[PPCLK_DCLK_0];
+ break;
+ case METRICS_CURR_FCLK:
+ *value = metrics->CurrClock[PPCLK_FCLK];
+ break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
+ case METRICS_AVERAGE_GFXCLK:
+ if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageGfxclkFrequencyPostDs;
+ else
+ *value = metrics->AverageGfxclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_FCLK:
+ if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageFclkFrequencyPostDs;
+ else
+ *value = metrics->AverageFclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_UCLK:
+ if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ *value = metrics->AverageMemclkFrequencyPostDs;
+ else
+ *value = metrics->AverageMemclkFrequencyPreDs;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->AverageVclk0Frequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->AverageDclk0Frequency;
+ break;
+ case METRICS_AVERAGE_VCLK1:
+ *value = metrics->AverageVclk1Frequency;
+ break;
+ case METRICS_AVERAGE_DCLK1:
+ *value = metrics->AverageDclk1Frequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->AverageGfxActivity;
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = metrics->AverageUclkActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = metrics->AverageSocketPower << 8;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->AvgTemperature[TEMP_EDGE] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = metrics->AvgTemperature[TEMP_MEM] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_VRGFX:
+ *value = metrics->AvgTemperature[TEMP_VR_GFX] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = metrics->AvgTemperature[TEMP_VR_SOC] *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = smu_v14_0_2_get_throttler_status(metrics);
+ break;
+ case METRICS_CURR_FANSPEED:
+ *value = metrics->AvgFanRpm;
+ break;
+ case METRICS_CURR_FANPWM:
+ *value = metrics->AvgFanPwm;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
+ break;
+ case METRICS_PCIE_RATE:
+ *value = metrics->PcieRate;
+ break;
+ case METRICS_PCIE_WIDTH:
+ *value = metrics->PcieWidth;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
+static int smu_v14_0_2_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data,
+ uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ int ret = 0;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_MEMACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_MEM,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+ int clk_id = 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return -EINVAL;
+
+ switch (clk_id) {
+ case PPCLK_GFXCLK:
+ member_type = METRICS_AVERAGE_GFXCLK;
+ break;
+ case PPCLK_UCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case PPCLK_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ case PPCLK_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case PPCLK_VCLK_0:
+ member_type = METRICS_AVERAGE_VCLK;
+ break;
+ case PPCLK_DCLK_0:
+ member_type = METRICS_AVERAGE_DCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ member_type,
+ value);
+}
+
+static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_14_0_dpm_table *single_dpm_table;
+ int i, curr_freq, size = 0;
+ int ret = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
+
+ switch (clk_type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ break;
+ case SMU_MCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get current clock freq!");
+ return ret;
+ }
+
+ if (single_dpm_table->is_fine_grained) {
+ /*
+ * For fine grained dpms, there are only two dpm levels:
+ * - level 0 -> min clock freq
+ * - level 1 -> max clock freq
+ * And the current clock frequency can be any value between them.
+ * So, if the current clock frequency is not at level 0 or level 1,
+ * we will fake it as three dpm levels:
+ * - level 0 -> min clock freq
+ * - level 1 -> current actual clock freq
+ * - level 2 -> max clock freq
+ */
+ if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
+ (single_dpm_table->dpm_levels[1].value != curr_freq)) {
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n",
+ single_dpm_table->dpm_levels[0].value);
+ size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
+ curr_freq);
+ size += sysfs_emit_at(buf, size, "2: %uMhz\n",
+ single_dpm_table->dpm_levels[1].value);
+ } else {
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+ single_dpm_table->dpm_levels[0].value,
+ single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ single_dpm_table->dpm_levels[1].value,
+ single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
+ }
+ } else {
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
+ }
+ break;
+ case SMU_PCIE:
+ // TODO
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_14_0_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level;
+ uint32_t min_freq, max_freq;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ break;
+ case SMU_SOCCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ break;
+ case SMU_FCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ if (single_dpm_table->is_fine_grained) {
+ /* There is only 2 levels for fine grained DPM */
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ } else {
+ if ((soft_max_level >= single_dpm_table->count) ||
+ (soft_min_level >= single_dpm_table->count))
+ return -EINVAL;
+ }
+
+ min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
+ max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ clk_type,
+ min_freq,
+ max_freq);
+ break;
+ case SMU_DCEFCLK:
+ case SMU_PCIE:
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_14_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+ for (i = 0; i < pcie_table->num_of_link_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ // TODO
+
+ return 0;
+}
+
+static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
+{
+ // TODO
+
+ return 0;
+}
+
+static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t upper32 = 0, lower32 = 0;
+ int ret;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ goto out;
+
+ upper32 = metrics->PublicSerialNumberUpper;
+ lower32 = metrics->PublicSerialNumberLower;
+
+out:
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+}
+
+static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
+{
+ // TODO
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ int16_t workload_type = 0;
+ uint32_t i, size = 0;
+ int result = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9]);
+
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0)
+ return -EINVAL;
+
+ result = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type,
+ (void *)(&activity_monitor_external),
+ false);
+ if (result) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+
+ size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor->Gfx_FPS,
+ activity_monitor->Gfx_MinActiveFreqType,
+ activity_monitor->Gfx_MinActiveFreq,
+ activity_monitor->Gfx_BoosterFreqType,
+ activity_monitor->Gfx_BoosterFreq,
+ activity_monitor->Gfx_PD_Data_limit_c,
+ activity_monitor->Gfx_PD_Data_error_coeff,
+ activity_monitor->Gfx_PD_Data_error_rate_coeff);
+
+ size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "FCLK",
+ activity_monitor->Fclk_FPS,
+ activity_monitor->Fclk_MinActiveFreqType,
+ activity_monitor->Fclk_MinActiveFreq,
+ activity_monitor->Fclk_BoosterFreqType,
+ activity_monitor->Fclk_BoosterFreq,
+ activity_monitor->Fclk_PD_Data_limit_c,
+ activity_monitor->Fclk_PD_Data_error_coeff,
+ activity_monitor->Fclk_PD_Data_error_rate_coeff);
+ }
+
+ return size;
+}
+
+static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
+ long *input,
+ uint32_t size)
+{
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
+ int workload_type, ret = 0;
+
+ smu->power_profile_mode = input[size];
+
+ if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[1];
+ activity_monitor->Gfx_MinActiveFreqType = input[2];
+ activity_monitor->Gfx_MinActiveFreq = input[3];
+ activity_monitor->Gfx_BoosterFreqType = input[4];
+ activity_monitor->Gfx_BoosterFreq = input[5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
+ break;
+ case 1: /* Fclk */
+ activity_monitor->Fclk_FPS = input[1];
+ activity_monitor->Fclk_MinActiveFreqType = input[2];
+ activity_monitor->Fclk_MinActiveFreq = input[3];
+ activity_monitor->Fclk_BoosterFreqType = input[4];
+ activity_monitor->Fclk_BoosterFreq = input[5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
+ break;
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
+}
+
+static int smu_v14_0_2_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v14_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v14_0_baco_enter(smu);
+}
+
+static int smu_v14_0_2_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v14_0_baco_exit(smu);
+ }
+}
+
+static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
+{
+ // TODO
+
+ return true;
+}
+
+static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
+ .master_xfer = smu_v14_0_2_i2c_xfer,
+ .functionality = smu_v14_0_2_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v14_0_2_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v14_0_2_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+ /* XXX ideally this would be something in a vbios data table */
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ break;
+ default:
+ /* Ignore others */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
+static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
+ FEATURE_PWR_GFX, NULL);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
+}
+
+static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetNumBadMemoryPagesRetired,
+ size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update bad memory pages number\n",
+ __func__);
+
+ return ret;
+}
+
+static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad channel info on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
+ size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update bad memory pages channel info\n",
+ __func__);
+
+ return ret;
+}
+
+static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ int ret = 0;
+
+ // TODO
+
+ return ret;
+}
+
+static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
+ .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
+ .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
+ .i2c_init = smu_v14_0_2_i2c_control_init,
+ .i2c_fini = smu_v14_0_2_i2c_control_fini,
+ .is_dpm_running = smu_v14_0_2_is_dpm_running,
+ .dump_pptable = smu_v14_0_2_dump_pptable,
+ .init_microcode = smu_v14_0_init_microcode,
+ .load_microcode = smu_v14_0_load_microcode,
+ .fini_microcode = smu_v14_0_fini_microcode,
+ .init_smc_tables = smu_v14_0_2_init_smc_tables,
+ .fini_smc_tables = smu_v14_0_fini_smc_tables,
+ .init_power = smu_v14_0_init_power,
+ .fini_power = smu_v14_0_fini_power,
+ .check_fw_status = smu_v14_0_check_fw_status,
+ .setup_pptable = smu_v14_0_2_setup_pptable,
+ .check_fw_version = smu_v14_0_check_fw_version,
+ .write_pptable = smu_cmn_write_pptable,
+ .set_driver_table_location = smu_v14_0_set_driver_table_location,
+ .system_features_control = smu_v14_0_system_features_control,
+ .set_allowed_mask = smu_v14_0_set_allowed_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
+ .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
+ .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
+ .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
+ .read_sensor = smu_v14_0_2_read_sensor,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .print_clk_levels = smu_v14_0_2_print_clk_levels,
+ .force_clk_levels = smu_v14_0_2_force_clk_levels,
+ .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
+ .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
+ .register_irq_handler = smu_v14_0_register_irq_handler,
+ .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
+ .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
+ .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
+ .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
+ .set_performance_level = smu_v14_0_set_performance_level,
+ .gfx_off_control = smu_v14_0_gfx_off_control,
+ .get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_power_limit = smu_v14_0_2_get_power_limit,
+ .set_power_limit = smu_v14_0_set_power_limit,
+ .set_power_source = smu_v14_0_set_power_source,
+ .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
+ .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
+ .run_btc = smu_v14_0_run_btc,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .set_tool_table_location = smu_v14_0_set_tool_table_location,
+ .deep_sleep_control = smu_v14_0_deep_sleep_control,
+ .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
+ .get_bamaco_support = smu_v14_0_get_bamaco_support,
+ .baco_get_state = smu_v14_0_baco_get_state,
+ .baco_set_state = smu_v14_0_baco_set_state,
+ .baco_enter = smu_v14_0_2_baco_enter,
+ .baco_exit = smu_v14_0_2_baco_exit,
+ .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
+ .mode1_reset = smu_v14_0_2_mode1_reset,
+ .mode2_reset = smu_v14_0_2_mode2_reset,
+ .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
+ .set_mp1_state = smu_v14_0_2_set_mp1_state,
+ .set_df_cstate = smu_v14_0_2_set_df_cstate,
+ .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
+ .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
+ .gpo_control = smu_v14_0_gpo_control,
+ .get_ecc_info = smu_v14_0_2_get_ecc_info,
+};
+
+void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
+ smu->message_map = smu_v14_0_2_message_map;
+ smu->clock_map = smu_v14_0_2_clk_map;
+ smu->feature_map = smu_v14_0_2_feature_mask_map;
+ smu->table_map = smu_v14_0_2_table_map;
+ smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
+ smu->workload_map = smu_v14_0_2_workload_map;
+ smu_v14_0_2_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h
new file mode 100644
index 000000000000..b83729e5d6f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_2_PPT_H__
+#define __SMU_V14_0_2_PPT_H__
+
+extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index b8dbd4e25348..6d1c3af927ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -235,6 +235,50 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
WREG32(smu->msg_reg, msg);
}
+static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
+ enum smu_message_type msg)
+{
+ return smu->message_map[msg].flags;
+}
+
+static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
+ enum smu_message_type msg, bool *poll)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t flags, resp;
+ bool fed_status;
+
+ flags = __smu_cmn_get_msg_flags(smu, msg);
+ *poll = true;
+
+ /* When there is RAS fatal error, FW won't process non-RAS priority
+ * messages. Don't allow any messages other than RAS priority messages.
+ */
+ fed_status = amdgpu_ras_get_fed_status(adev);
+ if (fed_status) {
+ if (!(flags & SMU_MSG_RAS_PRI)) {
+ dev_dbg(adev->dev,
+ "RAS error detected, skip sending %s",
+ smu_get_message_name(smu, msg));
+ return -EACCES;
+ }
+
+ /* FW will ignore non-priority messages when a RAS fatal error
+ * is detected. Hence it is possible that a previous message
+ * wouldn't have got response. Allow to continue without polling
+ * for response status for priority messages.
+ */
+ resp = RREG32(smu->resp_reg);
+ dev_dbg(adev->dev,
+ "Sending RAS priority message %s response status: %x",
+ smu_get_message_name(smu, msg), resp);
+ if (resp == 0)
+ *poll = false;
+ }
+
+ return 0;
+}
+
static int __smu_cmn_send_debug_msg(struct smu_context *smu,
u32 msg,
u32 param)
@@ -354,6 +398,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
int res, index;
+ bool poll = true;
u32 reg;
if (adev->no_hw_access)
@@ -366,12 +411,20 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- reg = __smu_cmn_poll_stat(smu);
- res = __smu_cmn_reg2errno(smu, reg);
- if (reg == SMU_RESP_NONE ||
- res == -EREMOTEIO) {
- __smu_cmn_reg_print_error(smu, reg, index, param, msg);
- goto Out;
+
+ if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
+ res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
+ if (res)
+ goto Out;
+ }
+
+ if (poll) {
+ reg = __smu_cmn_poll_stat(smu);
+ res = __smu_cmn_reg2errno(smu, reg);
+ if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
+ __smu_cmn_reg_print_error(smu, reg, index, param, msg);
+ goto Out;
+ }
}
__smu_cmn_send_msg(smu, (uint16_t) index, param);
reg = __smu_cmn_poll_stat(smu);
@@ -437,7 +490,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return -EINVAL;
if (amdgpu_sriov_vf(smu->adev) &&
- !msg_mapping.valid_in_vf)
+ !(msg_mapping.flags & SMU_MSG_VF_FLAG))
return -EACCES;
return msg_mapping.map_to;
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 42510fdea27e..67e5d3b4190f 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -4,6 +4,8 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+
+#include <linux/seq_file.h>
#include "d71_dev.h"
#include "komeda_kms.h"
#include "malidp_io.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 4b7d94961527..00f5864a0495 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -5,6 +5,7 @@
*
*/
#include <linux/of.h>
+#include <linux/seq_file.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 626709bec6f5..2577f0cef8fc 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ connector->state = NULL;
+
+ if (mw_state)
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index 29f4b52e3c8d..a763349dd89f 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -5,6 +5,7 @@
*/
#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 5a53ce51fb24..d794c076bc24 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,14 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o
+ast-y := \
+ ast_ddc.o \
+ ast_dp501.o \
+ ast_dp.o \
+ ast_drv.o \
+ ast_main.o \
+ ast_mm.o \
+ ast_mode.o \
+ ast_post.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_ddc.c
index e5d3f7121de4..29cf5d157f34 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_ddc.c
@@ -21,20 +21,31 @@
* of the Software.
*/
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include "ast_ddc.h"
#include "ast_drv.h"
-static void ast_i2c_setsda(void *i2c_priv, int data)
+struct ast_ddc {
+ struct ast_device *ast;
+
+ struct i2c_algo_bit_data bit;
+ struct i2c_adapter adapter;
+};
+
+static void ast_ddc_algo_bit_data_setsda(void *data, int state)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ujcrb7 = ((state & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x04);
if (ujcrb7 == jtemp)
@@ -42,15 +53,15 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
}
}
-static void ast_i2c_setscl(void *i2c_priv, int clock)
+static void ast_ddc_algo_bit_data_setscl(void *data, int state)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ujcrb7 = ((state & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x01);
if (ujcrb7 == jtemp)
@@ -58,10 +69,32 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
}
}
-static int ast_i2c_getsda(void *i2c_priv)
+static int ast_ddc_algo_bit_data_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct ast_ddc *ddc = i2c_get_adapdata(adapter);
+ struct ast_device *ast = ddc->ast;
+
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&ast->modeset_lock);
+
+ return 0;
+}
+
+static void ast_ddc_algo_bit_data_post_xfer(struct i2c_adapter *adapter)
+{
+ struct ast_ddc *ddc = i2c_get_adapdata(adapter);
+ struct ast_device *ast = ddc->ast;
+
+ mutex_unlock(&ast->modeset_lock);
+}
+
+static int ast_ddc_algo_bit_data_getsda(void *data)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
uint32_t val, val2, count, pass;
count = 0;
@@ -80,10 +113,10 @@ static int ast_i2c_getsda(void *i2c_priv)
return val & 1 ? 1 : 0;
}
-static int ast_i2c_getscl(void *i2c_priv)
+static int ast_ddc_algo_bit_data_getscl(void *data)
{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_device *ast = to_ast_device(i2c->dev);
+ struct ast_ddc *ddc = data;
+ struct ast_device *ast = ddc->ast;
uint32_t val, val2, count, pass;
count = 0;
@@ -102,50 +135,53 @@ static int ast_i2c_getscl(void *i2c_priv)
return val & 1 ? 1 : 0;
}
-static void ast_i2c_release(struct drm_device *dev, void *res)
+static void ast_ddc_release(struct drm_device *dev, void *res)
{
- struct ast_i2c_chan *i2c = res;
+ struct ast_ddc *ddc = res;
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
+ i2c_del_adapter(&ddc->adapter);
}
-struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+struct i2c_adapter *ast_ddc_create(struct ast_device *ast)
{
- struct ast_i2c_chan *i2c;
+ struct drm_device *dev = &ast->base;
+ struct ast_ddc *ddc;
+ struct i2c_adapter *adapter;
+ struct i2c_algo_bit_data *bit;
int ret;
- i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
- if (!i2c)
- return NULL;
-
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.dev.parent = dev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "AST i2c bit bus");
- i2c->adapter.algo_data = &i2c->bit;
-
- i2c->bit.udelay = 20;
- i2c->bit.timeout = 2;
- i2c->bit.data = i2c;
- i2c->bit.setsda = ast_i2c_setsda;
- i2c->bit.setscl = ast_i2c_setscl;
- i2c->bit.getsda = ast_i2c_getsda;
- i2c->bit.getscl = ast_i2c_getscl;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ ddc = drmm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return ERR_PTR(-ENOMEM);
+ ddc->ast = ast;
+
+ bit = &ddc->bit;
+ bit->data = ddc;
+ bit->setsda = ast_ddc_algo_bit_data_setsda;
+ bit->setscl = ast_ddc_algo_bit_data_setscl;
+ bit->getsda = ast_ddc_algo_bit_data_getsda;
+ bit->getscl = ast_ddc_algo_bit_data_getscl;
+ bit->pre_xfer = ast_ddc_algo_bit_data_pre_xfer;
+ bit->post_xfer = ast_ddc_algo_bit_data_post_xfer;
+ bit->udelay = 20;
+ bit->timeout = usecs_to_jiffies(2200);
+
+ adapter = &ddc->adapter;
+ adapter->owner = THIS_MODULE;
+ adapter->algo_data = bit;
+ adapter->dev.parent = dev->dev;
+ snprintf(adapter->name, sizeof(adapter->name), "AST DDC bus");
+ i2c_set_adapdata(adapter, ddc);
+
+ ret = i2c_bit_add_bus(adapter);
if (ret) {
drm_err(dev, "Failed to register bit i2c\n");
- goto out_kfree;
+ return ERR_PTR(ret);
}
- ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
+ ret = drmm_add_action_or_reset(dev, ast_ddc_release, ddc);
if (ret)
- return NULL;
- return i2c;
+ return ERR_PTR(ret);
-out_kfree:
- kfree(i2c);
- return NULL;
+ return &ddc->adapter;
}
diff --git a/drivers/gpu/drm/ast/ast_ddc.h b/drivers/gpu/drm/ast/ast_ddc.h
new file mode 100644
index 000000000000..85c93edc9ae1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ddc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __AST_DDC_H__
+#define __AST_DDC_H__
+
+struct ast_device;
+struct i2c_adapter;
+
+struct i2c_adapter *ast_ddc_create(struct ast_device *ast);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index ebb6d8ebd44e..1e9259416980 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 90bcb1eb9cd9..f8c49ba68e78 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -27,6 +27,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 3be5ccf1f5f4..ba3d86973995 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,8 +28,6 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
#include <linux/types.h>
@@ -149,37 +147,9 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
}
/*
- * Connector with i2c channel
+ * BMC
*/
-struct ast_i2c_chan {
- struct i2c_adapter adapter;
- struct drm_device *dev;
- struct i2c_algo_bit_data bit;
-};
-
-struct ast_vga_connector {
- struct drm_connector base;
- struct ast_i2c_chan *i2c;
-};
-
-static inline struct ast_vga_connector *
-to_ast_vga_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct ast_vga_connector, base);
-}
-
-struct ast_sil164_connector {
- struct drm_connector base;
- struct ast_i2c_chan *i2c;
-};
-
-static inline struct ast_sil164_connector *
-to_ast_sil164_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct ast_sil164_connector, base);
-}
-
struct ast_bmc_connector {
struct drm_connector base;
struct drm_connector *physical_connector;
@@ -222,11 +192,11 @@ struct ast_device {
struct {
struct {
struct drm_encoder encoder;
- struct ast_vga_connector vga_connector;
+ struct drm_connector connector;
} vga;
struct {
struct drm_encoder encoder;
- struct ast_sil164_connector sil164_connector;
+ struct drm_connector connector;
} sil164;
struct {
struct drm_encoder encoder;
@@ -498,9 +468,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
-/* ast_i2c.c */
-struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
-
/* aspeed DP */
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 2f3ad5f949fc..0637abb70361 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -26,6 +26,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a718646a66b8..6695af70768f 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -43,9 +43,11 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "ast_ddc.h"
#include "ast_drv.h"
#include "ast_tables.h"
@@ -700,12 +702,29 @@ static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x1, 0xdf, 0x20);
}
+static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+
+ if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ return 0;
+ }
+ return -ENODEV;
+}
+
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
+ .get_scanout_buffer = ast_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs ast_primary_plane_funcs = {
@@ -1343,43 +1362,9 @@ static int ast_crtc_init(struct drm_device *dev)
* VGA Connector
*/
-static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
- struct edid *edid;
- int count;
-
- if (!ast_vga_connector->i2c)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
- if (!edid)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
- .get_modes = ast_vga_connector_helper_get_modes,
+ .get_modes = drm_connector_helper_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs ast_vga_connector_funcs = {
@@ -1390,23 +1375,21 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_vga_connector_init(struct drm_device *dev,
- struct ast_vga_connector *ast_vga_connector)
+static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
- struct drm_connector *connector = &ast_vga_connector->base;
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
int ret;
- ast_vga_connector->i2c = ast_i2c_create(dev);
- if (!ast_vga_connector->i2c)
- drm_err(dev, "failed to add ddc bus for connector\n");
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
- if (ast_vga_connector->i2c)
- ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &ast_vga_connector->i2c->adapter);
- else
- ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
if (ret)
return ret;
@@ -1415,7 +1398,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1425,8 +1408,7 @@ static int ast_vga_output_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.vga.encoder;
- struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector;
- struct drm_connector *connector = &ast_vga_connector->base;
+ struct drm_connector *connector = &ast->output.vga.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
@@ -1434,7 +1416,7 @@ static int ast_vga_output_init(struct ast_device *ast)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_vga_connector_init(dev, ast_vga_connector);
+ ret = ast_vga_connector_init(dev, connector);
if (ret)
return ret;
@@ -1449,43 +1431,9 @@ static int ast_vga_output_init(struct ast_device *ast)
* SIL164 Connector
*/
-static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
- struct edid *edid;
- int count;
-
- if (!ast_sil164_connector->i2c)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
- if (!edid)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
- .get_modes = ast_sil164_connector_helper_get_modes,
+ .get_modes = drm_connector_helper_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs ast_sil164_connector_funcs = {
@@ -1496,23 +1444,21 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_sil164_connector_init(struct drm_device *dev,
- struct ast_sil164_connector *ast_sil164_connector)
+static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
- struct drm_connector *connector = &ast_sil164_connector->base;
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
int ret;
- ast_sil164_connector->i2c = ast_i2c_create(dev);
- if (!ast_sil164_connector->i2c)
- drm_err(dev, "failed to add ddc bus for connector\n");
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
- if (ast_sil164_connector->i2c)
- ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII,
- &ast_sil164_connector->i2c->adapter);
- else
- ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII);
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII, ddc);
if (ret)
return ret;
@@ -1521,7 +1467,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1531,8 +1477,7 @@ static int ast_sil164_output_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.sil164.encoder;
- struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector;
- struct drm_connector *connector = &ast_sil164_connector->base;
+ struct drm_connector *connector = &ast->output.sil164.connector;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1540,7 +1485,7 @@ static int ast_sil164_output_init(struct ast_device *ast)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_sil164_connector_init(dev, ast_sil164_connector);
+ ret = ast_sil164_connector_init(dev, connector);
if (ret)
return ret;
@@ -1952,13 +1897,13 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.vga.vga_connector.base;
+ physical_connector = &ast->output.vga.connector;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.sil164.sil164_connector.base;
+ physical_connector = &ast->output.sil164.connector;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
@@ -1978,7 +1923,9 @@ int ast_mode_config_init(struct ast_device *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
+ ret = drmm_kms_helper_poll_init(dev);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index efd996f6c138..30a17876ff50 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -92,13 +92,12 @@ config DRM_FSL_LDB
config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
- select DRM_DP_HELPER
select EXTCON
select CRYPTO
select CRYPTO_HASH
@@ -190,6 +189,13 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
+config DRM_MICROCHIP_LVDS_SERIALIZER
+ tristate "Microchip LVDS serializer support"
+ depends on OF
+ depends on DRM_ATMEL_HLCDC
+ help
+ Support for Microchip's LVDS serializer.
+
config DRM_NWL_MIPI_DSI
tristate "Northwest Logic MIPI DSI Host controller"
depends on DRM
@@ -226,10 +232,10 @@ config DRM_PARADE_PS8622
config DRM_PARADE_PS8640
tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@@ -313,9 +319,9 @@ config DRM_TOSHIBA_TC358764
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
@@ -336,9 +342,9 @@ config DRM_TOSHIBA_TC358768
config DRM_TOSHIBA_TC358775
tristate "Toshiba TC358775 DSI/LVDS bridge"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
@@ -381,15 +387,15 @@ config DRM_TI_SN65DSI83
config DRM_TI_SN65DSI86
tristate "TI SN65DSI86 DSI to eDP bridge"
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
select AUXILIARY_BUS
- select DRM_DP_AUX_BUS
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 017b5832733b..7df87b582dca 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
+obj-$(CONFIG_DRM_MICROCHIP_LVDS_SERIALIZER) += microchip-lvds.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 39c9ece373b0..ea271f62b214 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -356,6 +356,7 @@ struct adv7511 {
enum drm_connector_status status;
bool powered;
+ struct drm_bridge *next_bridge;
struct drm_display_mode curr_mode;
unsigned int f_tmds;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index b5518ff97165..dd21b81bd28f 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -477,6 +478,11 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
if (ret < 0)
return ret;
+ /* If there is no IRQ to handle, exit indicating no IRQ data */
+ if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) &&
+ !(irq1 & ADV7511_INT1_DDC_ERROR))
+ return -ENODATA;
+
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
@@ -946,6 +952,12 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
+ if (adv->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, flags);
+ if (ret)
+ return ret;
+ }
+
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
ret = adv7511_connector_init(adv);
if (ret < 0)
@@ -1216,6 +1228,11 @@ static int adv7511_probe(struct i2c_client *i2c)
memset(&link_config, 0, sizeof(link_config));
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+ &adv7511->next_bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+
if (adv7511->info->link_config)
ret = adv7511_parse_dt(dev->of_node, &link_config);
else
@@ -1318,7 +1335,8 @@ static int adv7511_probe(struct i2c_client *i2c)
ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(dev),
adv7511);
if (ret)
goto err_unregister_audio;
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index 173dada218ec..5b564fded6d6 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ANALOGIX_ANX6345
tristate "Analogix ANX6345 bridge"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_ANALOGIX_DP
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
@@ -15,9 +15,9 @@ config DRM_ANALOGIX_ANX6345
config DRM_ANALOGIX_ANX78XX
tristate "Analogix ANX78XX bridge"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
select DRM_ANALOGIX_DP
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
@@ -28,16 +28,16 @@ config DRM_ANALOGIX_ANX78XX
config DRM_ANALOGIX_DP
tristate
- depends on DRM
+ depends on DRM_DISPLAY_HELPER
config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
select DRM_MIPI_DSI
help
ANX7625 is an ultra-low power 4K mobile HD transmitter
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 9d96d28d6fe8..59e9ad349969 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2066,10 +2066,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
};
host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
@@ -2471,15 +2469,22 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
mutex_unlock(&ctx->aux_lock);
}
+static void
+anx7625_audio_update_connector_status(struct anx7625_data *ctx,
+ enum drm_connector_status status);
+
static enum drm_connector_status
anx7625_bridge_detect(struct drm_bridge *bridge)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
+ enum drm_connector_status status;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
- return anx7625_sink_detect(ctx);
+ status = anx7625_sink_detect(ctx);
+ anx7625_audio_update_connector_status(ctx, status);
+ return status;
}
static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index cced81633ddc..7817f6f56607 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -23,12 +23,12 @@ endif
config DRM_CDNS_MHDP8546
tristate "Cadence DPI/DP bridge"
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HELPER
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
- depends on OF
help
Support Cadence DPI to DP bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index e226acc5c15e..8a91ef0ae065 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2059,6 +2059,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+ if (!mhdp_state->current_mode)
+ return;
+
drm_mode_set_name(mhdp_state->current_mode);
dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 82d23e4df09e..9eecac457dcf 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) {
@@ -783,7 +781,6 @@ static struct mipi_dsi_driver chipone_dsi_driver = {
.remove = chipone_dsi_remove,
.driver = {
.name = "chipone-icn6211",
- .owner = THIS_MODULE,
.of_match_table = chipone_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 5965e8027529..13142a6b8590 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -5,11 +5,11 @@ config DRM_IMX_LDB_HELPER
config DRM_IMX8MP_DW_HDMI_BRIDGE
tristate "Freescale i.MX8MP HDMI-TX bridge support"
- depends on OF
depends on COMMON_CLK
- select DRM_DW_HDMI
- select DRM_IMX8MP_HDMI_PVI
- select PHY_FSL_SAMSUNG_HDMI_PHY
+ depends on DRM_DW_HDMI
+ depends on OF
+ imply DRM_IMX8MP_HDMI_PVI
+ imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index f2a09c879e3d..073e64dc200c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -173,15 +173,13 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
return 0;
}
-static int imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
+static void imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi_pvi *pvi = platform_get_drvdata(pdev);
drm_bridge_remove(&pvi->bridge);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id imx8mp_hdmi_pvi_match[] = {
@@ -195,7 +193,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match);
static struct platform_driver imx8mp_hdmi_pvi_driver = {
.probe = imx8mp_hdmi_pvi_probe,
- .remove = imx8mp_hdmi_pvi_remove,
+ .remove_new = imx8mp_hdmi_pvi_remove,
.driver = {
.name = "imx-hdmi-pvi",
.of_match_table = imx8mp_hdmi_pvi_match,
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 89fc432ac611..13bc570c5473 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -104,13 +104,11 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int imx8mp_dw_hdmi_remove(struct platform_device *pdev)
+static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
dw_hdmi_remove(hdmi->dw_hdmi);
-
- return 0;
}
static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev)
@@ -140,7 +138,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table);
static struct platform_driver imx8mp_dw_hdmi_platform_driver = {
.probe = imx8mp_dw_hdmi_probe,
- .remove = imx8mp_dw_hdmi_remove,
+ .remove_new = imx8mp_dw_hdmi_remove,
.driver = {
.name = "imx8mp-dw-hdmi-tx",
.of_match_table = imx8mp_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 27334173e911..3f68c82888c2 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3,6 +3,7 @@
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 1c3433b5e366..925e42f46cd8 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1540,12 +1540,6 @@ static int it66121_probe(struct i2c_client *client)
return -EINVAL;
}
- if (!of_device_is_available(ep)) {
- of_node_put(ep);
- dev_err(ctx->dev, "The remote device is disabled\n");
- return -ENODEV;
- }
-
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
if (!ctx->next_bridge) {
@@ -1586,13 +1580,18 @@ static int it66121_probe(struct i2c_client *client)
ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
-
- ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler,
- IRQF_ONESHOT, dev_name(dev), ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
- return ret;
+ ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ if (client->irq > 0) {
+ ctx->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ it66121_irq_threaded_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
+ return ret;
+ }
}
it66121_audio_codec_init(ctx, dev);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 4b2ae27f0a57..1a9defa15663 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -494,10 +494,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
};
host = of_find_mipi_dsi_host_by_node(lt->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a9c7e2b07ea1..b99fe87ec738 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -761,10 +761,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(lt9611->dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4f593ad8f79..ab702471f3ab 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -266,10 +266,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
new file mode 100644
index 000000000000..b8313dad6072
--- /dev/null
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Manikandan Muralidharan <manikandan.m@microchip.com>
+ * Author: Dharma Balasubiramani <dharma.b@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define LVDS_POLL_TIMEOUT_MS 1000
+
+/* LVDSC register offsets */
+#define LVDSC_CR 0x00
+#define LVDSC_CFGR 0x04
+#define LVDSC_SR 0x0C
+#define LVDSC_WPMR 0xE4
+
+/* Bitfields in LVDSC_CR (Control Register) */
+#define LVDSC_CR_SER_EN BIT(0)
+
+/* Bitfields in LVDSC_CFGR (Configuration Register) */
+#define LVDSC_CFGR_PIXSIZE_24BITS 0
+#define LVDSC_CFGR_DEN_POL_HIGH 0
+#define LVDSC_CFGR_DC_UNBALANCED 0
+#define LVDSC_CFGR_MAPPING_JEIDA BIT(6)
+
+/*Bitfields in LVDSC_SR */
+#define LVDSC_SR_CS BIT(0)
+
+/* Bitfields in LVDSC_WPMR (Write Protection Mode Register) */
+#define LVDSC_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define LVDSC_WPMR_WPKEY_PSSWD 0x4C5644
+
+struct mchp_lvds {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *pclk;
+ struct drm_panel *panel;
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static inline struct mchp_lvds *bridge_to_lvds(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct mchp_lvds, bridge);
+}
+
+static inline u32 lvds_readl(struct mchp_lvds *lvds, u32 offset)
+{
+ return readl_relaxed(lvds->regs + offset);
+}
+
+static inline void lvds_writel(struct mchp_lvds *lvds, u32 offset, u32 val)
+{
+ writel_relaxed(val, lvds->regs + offset);
+}
+
+static void lvds_serialiser_on(struct mchp_lvds *lvds)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(LVDS_POLL_TIMEOUT_MS);
+
+ /* The LVDSC registers can only be written if WPEN is cleared */
+ lvds_writel(lvds, LVDSC_WPMR, (LVDSC_WPMR_WPKEY_PSSWD &
+ LVDSC_WPMR_WPKEY_MASK));
+
+ /* Wait for the status of configuration registers to be changed */
+ while (lvds_readl(lvds, LVDSC_SR) & LVDSC_SR_CS) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(lvds->dev, "%s: timeout error\n", __func__);
+ return;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ /* Configure the LVDSC */
+ lvds_writel(lvds, LVDSC_CFGR, (LVDSC_CFGR_MAPPING_JEIDA |
+ LVDSC_CFGR_DC_UNBALANCED |
+ LVDSC_CFGR_DEN_POL_HIGH |
+ LVDSC_CFGR_PIXSIZE_24BITS));
+
+ /* Enable the LVDS serializer */
+ lvds_writel(lvds, LVDSC_CR, LVDSC_CR_SER_EN);
+}
+
+static int mchp_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ bridge, flags);
+}
+
+static void mchp_lvds_enable(struct drm_bridge *bridge)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+ int ret;
+
+ ret = clk_prepare_enable(lvds->pclk);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ return;
+ }
+
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ return;
+ }
+
+ lvds_serialiser_on(lvds);
+}
+
+static void mchp_lvds_disable(struct drm_bridge *bridge)
+{
+ struct mchp_lvds *lvds = bridge_to_lvds(bridge);
+
+ pm_runtime_put(lvds->dev);
+ clk_disable_unprepare(lvds->pclk);
+}
+
+static const struct drm_bridge_funcs mchp_lvds_bridge_funcs = {
+ .attach = mchp_lvds_attach,
+ .enable = mchp_lvds_enable,
+ .disable = mchp_lvds_disable,
+};
+
+static int mchp_lvds_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_lvds *lvds;
+ struct device_node *port;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = dev;
+
+ lvds->regs = devm_ioremap_resource(lvds->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(lvds->dev, "pclk");
+ if (IS_ERR(lvds->pclk))
+ return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk),
+ "could not get pclk_lvds\n");
+
+ port = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (!port) {
+ dev_err(dev,
+ "can't find port point, please init lvds panel port!\n");
+ return -ENODEV;
+ }
+
+ lvds->panel = of_drm_find_panel(port);
+ of_node_put(port);
+
+ if (IS_ERR(lvds->panel))
+ return -EPROBE_DEFER;
+
+ lvds->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+
+ if (IS_ERR(lvds->panel_bridge))
+ return PTR_ERR(lvds->panel_bridge);
+
+ lvds->bridge.of_node = dev->of_node;
+ lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
+ lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
+
+ dev_set_drvdata(dev, lvds);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err(lvds->dev, "failed to enable pm runtime: %d\n", ret);
+ return ret;
+ }
+
+ drm_bridge_add(&lvds->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_lvds_dt_ids[] = {
+ {
+ .compatible = "microchip,sam9x75-lvds",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mchp_lvds_dt_ids);
+
+static struct platform_driver mchp_lvds_driver = {
+ .probe = mchp_lvds_probe,
+ .driver = {
+ .name = "microchip-lvds",
+ .of_match_table = mchp_lvds_dt_ids,
+ },
+};
+module_platform_driver(mchp_lvds_driver);
+
+MODULE_AUTHOR("Manikandan Muralidharan <manikandan.m@microchip.com>");
+MODULE_AUTHOR("Dharma Balasubiramani <dharma.b@microchip.com>");
+MODULE_DESCRIPTION("Low Voltage Differential Signaling Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 7f41525f7a6e..32506524d9a2 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,6 +4,8 @@
* Copyright (C) 2017 Broadcom
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 15fc182d05ef..1252fd30d4a4 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_DW_HDMI
- tristate
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
+ tristate "Synopsys Designware HDMI TX Controller"
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_MMIO
select CEC_CORE if CEC_NOTIFIER
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index cceb5aab6c83..9f2bc932c371 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3291,40 +3291,17 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi)
static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
{
- struct device_node *endpoint;
struct device_node *remote;
if (!hdmi->plat_data->output_port)
return 0;
- endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node,
- hdmi->plat_data->output_port,
- -1);
- if (!endpoint) {
- /*
- * On platforms whose bindings don't make the output port
- * mandatory (such as Rockchip) the plat_data->output_port
- * field isn't set, so it's safe to make this a fatal error.
- */
- dev_err(hdmi->dev, "Missing endpoint in port@%u\n",
- hdmi->plat_data->output_port);
- return -ENODEV;
- }
- remote = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!remote) {
- dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n",
- hdmi->plat_data->output_port);
+ remote = of_graph_get_remote_node(hdmi->dev->of_node,
+ hdmi->plat_data->output_port,
+ -1);
+ if (!remote)
return -ENODEV;
- }
-
- if (!of_device_is_available(remote)) {
- dev_err(hdmi->dev, "port@%u remote device is disabled\n",
- hdmi->plat_data->output_port);
- of_node_put(remote);
- return -ENODEV;
- }
hdmi->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index deccb3995022..3d3d135b4348 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -401,7 +401,6 @@ static struct mipi_dsi_driver tc358764_driver = {
.remove = tc358764_remove,
.driver = {
.name = "tc358764",
- .owner = THIS_MODULE,
.of_match_table = tc358764_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 90a89d70d832..3b7cc3be2ccd 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -107,6 +108,7 @@
#define RDPKTLN 0x0404 /* Command Read Packet Length */
#define VPCTRL 0x0450 /* Video Path Control */
+#define EVTMODE BIT(5) /* Video event mode enable, tc35876x only */
#define HTIM1 0x0454 /* Horizontal Timing Control 1 */
#define HTIM2 0x0458 /* Horizontal Timing Control 2 */
#define VTIM1 0x045C /* Vertical Timing Control 1 */
@@ -254,6 +256,11 @@ enum tc358775_ports {
TC358775_LVDS_OUT1,
};
+enum tc3587x5_type {
+ TC358765 = 0x65,
+ TC358775 = 0x75,
+};
+
struct tc_data {
struct i2c_client *i2c;
struct device *dev;
@@ -271,6 +278,8 @@ struct tc_data {
struct gpio_desc *stby_gpio;
u8 lvds_link; /* single-link or dual-link */
u8 bpc;
+
+ enum tc3587x5_type type;
};
static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
@@ -424,10 +433,16 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, PPI_STARTPPI, PPI_START_FUNCTION);
d2l_write(tc->i2c, DSI_STARTDSI, DSI_RX_START);
+ /* Video event mode vs pulse mode bit, does not exist for tc358775 */
+ if (tc->type == TC358765)
+ val = EVTMODE;
+ else
+ val = 0;
+
if (tc->bpc == 8)
- val = TC358775_VPCTRL_OPXLFMT(1);
+ val |= TC358775_VPCTRL_OPXLFMT(1);
else /* bpc = 6; */
- val = TC358775_VPCTRL_MSF(1);
+ val |= TC358775_VPCTRL_MSF(1);
dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000;
clkdiv = dsiclk / (tc->lvds_link == DUAL_LINK ? DIVIDE_BY_6 : DIVIDE_BY_3);
@@ -454,10 +469,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
connector->display_info.bus_formats[0],
tc->bpc);
- /*
- * Default hardware register settings of tc358775 configured
- * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
- */
if (connector->display_info.bus_formats[0] ==
MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
/* VESA-24 */
@@ -468,14 +479,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
- } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
- d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
- d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
- d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
- d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
- d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
- d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
- d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
+ } else {
+ /* JEIDA-18 and JEIDA-24 */
+ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
+ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
+ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
+ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
}
d2l_write(tc->i2c, VFUEN, VFUEN_EN);
@@ -528,27 +540,24 @@ tc_mode_valid(struct drm_bridge *bridge,
static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
{
struct device_node *endpoint;
- struct device_node *parent;
struct device_node *remote;
int dsi_lanes = -1;
- /*
- * To get the data-lanes of dsi, we need to access the dsi0_out of port1
- * of dsi0 endpoint from bridge port0 of d2l_in
- */
endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node,
TC358775_DSI_IN, -1);
- if (endpoint) {
- /* dsi0_out node */
- parent = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (parent) {
- /* dsi0 port 1 */
- dsi_lanes = drm_of_get_data_lanes_count_ep(parent, 1, -1, 1, 4);
- of_node_put(parent);
- }
+ dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4);
+
+ /* Quirk old dtb: Use data lanes from the DSI host side instead of bridge */
+ if (dsi_lanes == -EINVAL || dsi_lanes == -ENODEV) {
+ remote = of_graph_get_remote_endpoint(endpoint);
+ dsi_lanes = drm_of_get_data_lanes_count(remote, 1, 4);
+ of_node_put(remote);
+ if (dsi_lanes >= 1)
+ dev_warn(tc->dev, "no dsi-lanes for the bridge, using host lanes\n");
}
+ of_node_put(endpoint);
+
if (dsi_lanes < 0)
return dsi_lanes;
@@ -610,10 +619,8 @@ static int tc_attach_host(struct tc_data *tc)
};
host = of_find_mipi_dsi_host_by_node(tc->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
@@ -625,7 +632,21 @@ static int tc_attach_host(struct tc_data *tc)
dsi->lanes = tc->num_dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ /*
+ * The hs_rate and lp_rate are data rate values. The HS mode is
+ * differential, while the LP mode is single ended. As the HS mode
+ * uses DDR, the DSI clock frequency is half the hs_rate. The 10 Mbs
+ * data rate for LP mode is not specified in the bridge data sheet,
+ * but seems to be part of the MIPI DSI spec.
+ */
+ if (tc->type == TC358765)
+ dsi->hs_rate = 800000000;
+ else
+ dsi->hs_rate = 1000000000;
+ dsi->lp_rate = 10000000;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
@@ -648,6 +669,7 @@ static int tc_probe(struct i2c_client *client)
tc->dev = dev;
tc->i2c = client;
+ tc->type = (enum tc3587x5_type)(unsigned long)of_device_get_match_data(dev);
tc->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node,
TC358775_LVDS_OUT0, 0);
@@ -672,12 +694,9 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->stby_gpio = devm_gpiod_get(dev, "stby", GPIOD_OUT_HIGH);
- if (IS_ERR(tc->stby_gpio)) {
- ret = PTR_ERR(tc->stby_gpio);
- dev_err(dev, "cannot get stby-gpio %d\n", ret);
- return ret;
- }
+ tc->stby_gpio = devm_gpiod_get_optional(dev, "stby", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->stby_gpio))
+ return PTR_ERR(tc->stby_gpio);
tc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(tc->reset_gpio)) {
@@ -688,6 +707,7 @@ static int tc_probe(struct i2c_client *client)
tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
+ tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
i2c_set_clientdata(client, tc);
@@ -711,13 +731,15 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
- { "tc358775", 0 },
+ { "tc358765", TC358765, },
+ { "tc358775", TC358775, },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358775_i2c_ids);
static const struct of_device_id tc358775_of_ids[] = {
- { .compatible = "toshiba,tc358775", },
+ { .compatible = "toshiba,tc358765", .data = (void *)TC358765, },
+ { .compatible = "toshiba,tc358775", .data = (void *)TC358775, },
{ }
};
MODULE_DEVICE_TABLE(of, tc358775_of_ids);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index d4c1a601bbb5..674efc489e3a 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -123,26 +123,11 @@ static int thc63_parse_dt(struct thc63_dev *thc63)
struct device_node *endpoint;
struct device_node *remote;
- endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
- THC63_RGB_OUT0, -1);
- if (!endpoint) {
- dev_err(thc63->dev, "Missing endpoint in port@%u\n",
- THC63_RGB_OUT0);
- return -ENODEV;
- }
-
- remote = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
+ remote = of_graph_get_remote_node(thc63->dev->of_node,
+ THC63_RGB_OUT0, -1);
if (!remote) {
- dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
- THC63_RGB_OUT0);
- return -ENODEV;
- }
-
- if (!of_device_is_available(remote)) {
- dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
+ dev_err(thc63->dev, "No remote endpoint for port@%u\n",
THC63_RGB_OUT0);
- of_node_put(remote);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index ca3348109bcd..6b559e071301 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
.channel = 0,
.node = NULL,
};
+ int ret;
host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dlpc->dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dlpc->dsi)) {
@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
dlpc->dsi->lanes = dlpc->dsi_lanes;
- return devm_mipi_dsi_attach(dev, dlpc->dsi);
+ ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+
+ return ret;
}
static int dlpc3433_probe(struct i2c_client *client)
@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
drm_bridge_add(&dlpc->bridge);
ret = dlpc_host_attach(dlpc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+ if (ret)
goto err_remove_bridge;
- }
return 0;
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 0857773e5c5f..8bc63912fddb 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -252,11 +252,11 @@ i915:cml:
i915:tgl:
extends:
- .i915
- parallel: 8
+ parallel: 5
variables:
- DEVICE_TYPE: asus-cx9400-volteer
+ DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
GPU_VERSION: tgl
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
+ RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
.amdgpu:
extends:
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index c0f56888c328..a38962a556c2 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -1,31 +1,58 @@
# SPDX-License-Identifier: MIT
-config DRM_DP_AUX_BUS
- tristate
+config DRM_DISPLAY_HELPER
+ tristate "DRM Display Helpers"
+ depends on DRM
+ default y
+ help
+ DRM helpers for display adapters.
+
+config DRM_DISPLAY_DP_AUX_BUS
+ tristate "DRM DisplayPort AUX bus support"
depends on DRM
depends on OF || COMPILE_TEST
+ default y
-config DRM_DISPLAY_HELPER
- tristate
+config DRM_DISPLAY_DP_AUX_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
depends on DRM
+ depends on DRM_DISPLAY_HELPER
+ depends on DRM_DISPLAY_DP_HELPER
+ select CEC_CORE
help
- DRM helpers for display adapters.
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
+config DRM_DISPLAY_DP_AUX_CHARDEV
+ bool "DRM DisplayPort AUX Interface"
+ depends on DRM
+ depends on DRM_DISPLAY_HELPER
+ depends on DRM_DISPLAY_DP_HELPER
+ help
+ Choose this option to enable a /dev/drm_dp_auxN node that allows to
+ read and write values to arbitrary DPCD registers on the DP aux
+ channel.
config DRM_DISPLAY_DP_HELPER
- bool
+ bool "DRM DisplayPort Helpers"
depends on DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ default y
help
DRM display helpers for DisplayPort.
config DRM_DISPLAY_DP_TUNNEL
- bool
- select DRM_DISPLAY_DP_HELPER
+ bool "DRM DisplayPort tunnels support"
+ depends on DRM_DISPLAY_DP_HELPER
help
Enable support for DisplayPort tunnels. This allows drivers to use
DP tunnel features like the Bandwidth Allocation mode to maximize the
BW utilization for display streams on Thunderbolt links.
-config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
bool "Enable debugging the DP tunnel state"
depends on REF_TRACKER
depends on DRM_DISPLAY_DP_TUNNEL
@@ -39,34 +66,15 @@ config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
If in doubt, say "N".
config DRM_DISPLAY_HDCP_HELPER
- bool
+ bool "DRM HDCD Helpers"
depends on DRM_DISPLAY_HELPER
+ default y
help
DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER
- bool
+ bool "DRM HDMI Helpers"
depends on DRM_DISPLAY_HELPER
+ default y
help
DRM display helpers for HDMI.
-
-config DRM_DP_AUX_CHARDEV
- bool "DRM DP AUX Interface"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- help
- Choose this option to enable a /dev/drm_dp_auxN node that allows to
- read and write values to arbitrary DPCD registers on the DP aux
- channel.
-
-config DRM_DP_CEC
- bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
- help
- Choose this option if you want to enable HDMI CEC support for
- DisplayPort/USB-C to HDMI adapters.
-
- Note: not all adapters support this feature, and even for those
- that do support this they often do not hook up the CEC pin.
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 7ca61333c669..17d2cc73ff56 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
-obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
+obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
@@ -14,7 +14,7 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
-drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
-drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index bd61e20770a5..14a2a8473682 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -52,7 +52,7 @@
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_read);
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 266826eac4a7..79a615667aab 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
* call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
* Functions which don't follow this will likely Oops when
- * %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ * %CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV is enabled.
*
* For devices where the AUX channel is a device that exists independently of
* the &drm_device that uses it, such as SoCs and bridge devices, it is
@@ -2281,6 +2281,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
/* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
+ /* MediaTek panels (at least in U3224KBA) require DSC for modes with a short HBLANK on UHBR links. */
+ { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
@@ -2948,6 +2950,43 @@ void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+void drm_dp_as_sdp_log(struct drm_printer *p, const struct drm_dp_as_sdp *as_sdp)
+{
+ drm_printf(p, "DP SDP: AS_SDP, revision %u, length %u\n",
+ as_sdp->revision, as_sdp->length);
+ drm_printf(p, " vtotal: %d\n", as_sdp->vtotal);
+ drm_printf(p, " target_rr: %d\n", as_sdp->target_rr);
+ drm_printf(p, " duration_incr_ms: %d\n", as_sdp->duration_incr_ms);
+ drm_printf(p, " duration_decr_ms: %d\n", as_sdp->duration_decr_ms);
+ drm_printf(p, " operation_mode: %d\n", as_sdp->mode);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_log);
+
+/**
+ * drm_dp_as_sdp_supported() - check if adaptive sync sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if adaptive sync sdp is supported, else returns false
+ */
+bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev,
+ "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
+ return false;
+ }
+
+ return (rx_feature & DP_ADAPTIVE_SYNC_SDP_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_supported);
+
/**
* drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
* @aux: DisplayPort AUX channel
@@ -4111,6 +4150,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
u32 overhead = 1000000;
int symbol_cycles;
+ if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
+ DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
+ lane_count, hactive,
+ bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
+ return 0;
+ }
+
/*
* DP Standard v2.1 2.6.4.1
* SSC downspread and ref clock variation margin:
diff --git a/drivers/gpu/drm/display/drm_dp_helper_internal.h b/drivers/gpu/drm/display/drm_dp_helper_internal.h
index 8917fc3af9ec..737949a2820f 100644
--- a/drivers/gpu/drm/display/drm_dp_helper_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_helper_internal.h
@@ -5,7 +5,7 @@
struct drm_dp_aux;
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 03d528209426..3577786b5db2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2274,7 +2274,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0)
+ drm_dp_mst_port_is_logical(port))
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
@@ -3608,24 +3608,30 @@ fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
- * drm_dp_read_mst_cap() - check whether or not a sink supports MST
+ * drm_dp_read_mst_cap() - Read the sink's MST mode capability
* @aux: The DP AUX channel to use
* @dpcd: A cached copy of the DPCD capabilities for this sink
*
- * Returns: %True if the sink supports MST, %false otherwise
+ * Returns: enum drm_dp_mst_mode to indicate MST mode capability
*/
-bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 mstm_cap;
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
- return false;
+ return DRM_DP_SST;
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
- return false;
+ return DRM_DP_SST;
+
+ if (mstm_cap & DP_MST_CAP)
+ return DRM_DP_MST;
- return mstm_cap & DP_MST_CAP;
+ if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
+ return DRM_DP_SST_SIDEBAND_MSG;
+
+ return DRM_DP_SST;
}
EXPORT_SYMBOL(drm_dp_read_mst_cap);
@@ -4213,7 +4219,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
- if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
+ if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)
port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -5977,7 +5983,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
return false;
/* Virtual DP Sink (Internal Display Panel) */
- if (port->port_num >= 8)
+ if (drm_dp_mst_port_is_logical(port))
return true;
/* DP-to-HDMI Protocol Converter */
@@ -6005,6 +6011,22 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
}
/**
+ * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
+ * @port: MST port whose parent's AUX device is returned
+ *
+ * Return the AUX device for @port's parent or NULL if port's parent is the
+ * root port.
+ */
+struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)
+{
+ if (!port->parent || !port->parent->port_parent)
+ return NULL;
+
+ return &port->parent->port_parent->aux;
+}
+EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);
+
+/**
* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
* @port: The port to check. A leaf of the MST tree with an attached display.
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
index a785ccbfdd73..f41c34e26be2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
@@ -10,7 +10,9 @@
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
-#include <drm/display/drm_dp_mst_helper.h>
+struct drm_dp_sideband_msg_req_body;
+struct drm_dp_sideband_msg_tx;
+struct drm_printer;
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 120e0de674c1..48b2df120086 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -191,7 +191,7 @@ struct drm_dp_tunnel_mgr {
struct drm_dp_tunnel_group *groups;
wait_queue_head_t bw_req_queue;
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
struct ref_tracker_dir ref_tracker;
#endif
};
@@ -385,7 +385,7 @@ static void tunnel_put(struct drm_dp_tunnel *tunnel)
kref_put(&tunnel->kref, free_tunnel);
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
struct ref_tracker **tracker)
{
@@ -436,8 +436,8 @@ EXPORT_SYMBOL(drm_dp_tunnel_get);
/**
* drm_dp_tunnel_put - Put a reference for a DP tunnel
- * @tunnel - Tunnel object
- * @tracker - Debug tracker for the reference
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
*
* Put a reference for @tunnel along with its debug *@tracker, which
* was obtained with drm_dp_tunnel_get().
@@ -1170,7 +1170,7 @@ int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
/**
- * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel
+ * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
* @tunnel: Tunnel object
*
* Get the current BW allocated for @tunnel. After the tunnel is created /
@@ -1603,7 +1603,7 @@ static void cleanup_group(struct drm_dp_tunnel_group *group)
drm_atomic_private_obj_fini(&group->base);
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
{
const struct drm_dp_tunnel_state *tunnel_state;
@@ -1881,7 +1881,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_exit(&mgr->ref_tracker);
#endif
@@ -1892,6 +1892,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
/**
* drm_dp_tunnel_mgr_create - Create a DP tunnel manager
* @dev: DRM device object
+ * @max_group_count: Maximum number of tunnel groups
*
* Creates a DP tunnel manager for @dev.
*
@@ -1918,7 +1919,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
return NULL;
}
-#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
#endif
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 39ef0a6addeb..fb97b51b38f1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -3016,6 +3017,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
int i, ret;
+ unsigned long flags;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
@@ -3099,6 +3101,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
}
}
+ drm_panic_lock(state->dev, flags);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);
@@ -3108,6 +3111,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
state->planes[i].state = old_plane_state;
plane->state = new_plane_state;
}
+ drm_panic_unlock(state->dev, flags);
for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
WARN_ON(obj->state != old_obj_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 29d4940188d4..fc16fddee5c5 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -145,10 +145,10 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
&state->mode, blob->data);
if (ret) {
drm_dbg_atomic(crtc->dev,
- "[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ "[CRTC:%d:%s] invalid mode (%s, %pe): " DRM_MODE_FMT "\n",
crtc->base.id, crtc->name,
- ret, drm_get_mode_status_name(state->mode.status));
- drm_mode_debug_printmodeline(&state->mode);
+ drm_get_mode_status_name(state->mode.status),
+ ERR_PTR(ret), DRM_MODE_ARG(&state->mode));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 521a71c61b16..28abe9aa99ca 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -657,6 +657,13 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* bridge will be called before the previous one to reverse the @pre_enable
* calling direction.
*
+ * Example:
+ * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
+ *
+ * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
+ * @post_disable order would be,
+ * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -687,11 +694,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
*/
list_for_each_entry_from(next, &encoder->bridge_chain,
chain_node) {
- if (next->pre_enable_prev_first) {
+ if (!next->pre_enable_prev_first) {
next = list_prev_entry(next, chain_node);
limit = next;
break;
}
+
+ if (list_is_last(&next->chain_node,
+ &encoder->bridge_chain)) {
+ limit = next;
+ break;
+ }
}
/* Call these bridges in reverse order */
@@ -747,6 +760,13 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* If a bridge sets @pre_enable_prev_first, then the pre_enable for the
* prev bridge will be called before pre_enable of this bridge.
*
+ * Example:
+ * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
+ *
+ * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
+ * @pre_enable order would be,
+ * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
@@ -774,7 +794,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
/* Found first bridge that does NOT
* request prev to be enabled first
*/
- limit = list_prev_entry(next, chain_node);
+ limit = next;
break;
}
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 5ebdd6f8f36e..284ebae71cc4 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -57,6 +57,16 @@ static void list_insert_sorted(struct drm_buddy *mm,
__list_add(&block->link, node->link.prev, &node->link);
}
+static void clear_reset(struct drm_buddy_block *block)
+{
+ block->header &= ~DRM_BUDDY_HEADER_CLEAR;
+}
+
+static void mark_cleared(struct drm_buddy_block *block)
+{
+ block->header |= DRM_BUDDY_HEADER_CLEAR;
+}
+
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -82,6 +92,133 @@ static void mark_split(struct drm_buddy_block *block)
list_del(&block->link);
}
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+static struct drm_buddy_block *
+__get_buddy(struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static unsigned int __drm_buddy_free(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ bool force_merge)
+{
+ struct drm_buddy_block *parent;
+ unsigned int order;
+
+ while ((parent = block->parent)) {
+ struct drm_buddy_block *buddy;
+
+ buddy = __get_buddy(block);
+
+ if (!drm_buddy_block_is_free(buddy))
+ break;
+
+ if (!force_merge) {
+ /*
+ * Check the block and its buddy clear state and exit
+ * the loop if they both have the dissimilar state.
+ */
+ if (drm_buddy_block_is_clear(block) !=
+ drm_buddy_block_is_clear(buddy))
+ break;
+
+ if (drm_buddy_block_is_clear(block))
+ mark_cleared(parent);
+ }
+
+ list_del(&buddy->link);
+ if (force_merge && drm_buddy_block_is_clear(buddy))
+ mm->clear_avail -= drm_buddy_block_size(mm, buddy);
+
+ drm_block_free(mm, block);
+ drm_block_free(mm, buddy);
+
+ block = parent;
+ }
+
+ order = drm_buddy_block_order(block);
+ mark_free(mm, block);
+
+ return order;
+}
+
+static int __force_merge(struct drm_buddy *mm,
+ u64 start,
+ u64 end,
+ unsigned int min_order)
+{
+ unsigned int order;
+ int i;
+
+ if (!min_order)
+ return -ENOMEM;
+
+ if (min_order > mm->max_order)
+ return -EINVAL;
+
+ for (i = min_order - 1; i >= 0; i--) {
+ struct drm_buddy_block *block, *prev;
+
+ list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
+ struct drm_buddy_block *buddy;
+ u64 block_start, block_end;
+
+ if (!block->parent)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+ if (!contains(start, end, block_start, block_end))
+ continue;
+
+ buddy = __get_buddy(block);
+ if (!drm_buddy_block_is_free(buddy))
+ continue;
+
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
+
+ /*
+ * If the prev block is same as buddy, don't access the
+ * block in the next iteration as we would free the
+ * buddy block as part of the free function.
+ */
+ if (prev == buddy)
+ prev = list_prev_entry(prev, link);
+
+ list_del(&block->link);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+ order = __drm_buddy_free(mm, block, true);
+ if (order >= min_order)
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
/**
* drm_buddy_init - init memory manager
*
@@ -186,11 +323,21 @@ EXPORT_SYMBOL(drm_buddy_init);
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
+ u64 root_size, size;
+ unsigned int order;
int i;
+ size = mm->size;
+
for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ __force_merge(mm, 0, size, order);
+
WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
}
WARN_ON(mm->avail != mm->size);
@@ -223,26 +370,17 @@ static int split_block(struct drm_buddy *mm,
mark_free(mm, block->left);
mark_free(mm, block->right);
+ if (drm_buddy_block_is_clear(block)) {
+ mark_cleared(block->left);
+ mark_cleared(block->right);
+ clear_reset(block);
+ }
+
mark_split(block);
return 0;
}
-static struct drm_buddy_block *
-__get_buddy(struct drm_buddy_block *block)
-{
- struct drm_buddy_block *parent;
-
- parent = block->parent;
- if (!parent)
- return NULL;
-
- if (parent->left == block)
- return parent->right;
-
- return parent->left;
-}
-
/**
* drm_get_buddy - get buddy address
*
@@ -260,30 +398,6 @@ drm_get_buddy(struct drm_buddy_block *block)
}
EXPORT_SYMBOL(drm_get_buddy);
-static void __drm_buddy_free(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *parent;
-
- while ((parent = block->parent)) {
- struct drm_buddy_block *buddy;
-
- buddy = __get_buddy(block);
-
- if (!drm_buddy_block_is_free(buddy))
- break;
-
- list_del(&buddy->link);
-
- drm_block_free(mm, block);
- drm_block_free(mm, buddy);
-
- block = parent;
- }
-
- mark_free(mm, block);
-}
-
/**
* drm_buddy_free_block - free a block
*
@@ -295,42 +409,74 @@ void drm_buddy_free_block(struct drm_buddy *mm,
{
BUG_ON(!drm_buddy_block_is_allocated(block));
mm->avail += drm_buddy_block_size(mm, block);
- __drm_buddy_free(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+
+ __drm_buddy_free(mm, block, false);
}
EXPORT_SYMBOL(drm_buddy_free_block);
-/**
- * drm_buddy_free_list - free blocks
- *
- * @mm: DRM buddy manager
- * @objects: input list head to free blocks
- */
-void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
+static void __drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ bool mark_clear,
+ bool mark_dirty)
{
struct drm_buddy_block *block, *on;
+ WARN_ON(mark_dirty && mark_clear);
+
list_for_each_entry_safe(block, on, objects, link) {
+ if (mark_clear)
+ mark_cleared(block);
+ else if (mark_dirty)
+ clear_reset(block);
drm_buddy_free_block(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
-EXPORT_SYMBOL(drm_buddy_free_list);
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+static void drm_buddy_free_list_internal(struct drm_buddy *mm,
+ struct list_head *objects)
{
- return s1 <= e2 && e1 >= s2;
+ /*
+ * Don't touch the clear/dirty bit, since allocation is still internal
+ * at this point. For example we might have just failed part of the
+ * allocation.
+ */
+ __drm_buddy_free_list(mm, objects, false, false);
}
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+/**
+ * drm_buddy_free_list - free blocks
+ *
+ * @mm: DRM buddy manager
+ * @objects: input list head to free blocks
+ * @flags: optional flags like DRM_BUDDY_CLEARED
+ */
+void drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags)
{
- return s1 <= s2 && e1 >= e2;
+ bool mark_clear = flags & DRM_BUDDY_CLEARED;
+
+ __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
+}
+EXPORT_SYMBOL(drm_buddy_free_list);
+
+static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
+{
+ bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
+
+ return needs_clear != drm_buddy_block_is_clear(block);
}
static struct drm_buddy_block *
-alloc_range_bias(struct drm_buddy *mm,
- u64 start, u64 end,
- unsigned int order)
+__alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags,
+ bool fallback)
{
u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
@@ -379,6 +525,9 @@ alloc_range_bias(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
+ if (!fallback && block_incompatible(block, flags))
+ continue;
+
/*
* Find the free block within the range.
*/
@@ -410,30 +559,57 @@ err_undo:
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
return ERR_PTR(err);
}
static struct drm_buddy_block *
-get_maxblock(struct drm_buddy *mm, unsigned int order)
+__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags)
+{
+ struct drm_buddy_block *block;
+ bool fallback = false;
+
+ block = __alloc_range_bias(mm, start, end, order,
+ flags, fallback);
+ if (IS_ERR(block) && mm->clear_avail)
+ return __alloc_range_bias(mm, start, end, order,
+ flags, !fallback);
+
+ return block;
+}
+
+static struct drm_buddy_block *
+get_maxblock(struct drm_buddy *mm, unsigned int order,
+ unsigned long flags)
{
- struct drm_buddy_block *max_block = NULL, *node;
+ struct drm_buddy_block *max_block = NULL, *block = NULL;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
- if (!list_empty(&mm->free_list[i])) {
- node = list_last_entry(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (!max_block) {
- max_block = node;
+ struct drm_buddy_block *tmp_block;
+
+ list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
+ if (block_incompatible(tmp_block, flags))
continue;
- }
- if (drm_buddy_block_offset(node) >
- drm_buddy_block_offset(max_block)) {
- max_block = node;
- }
+ block = tmp_block;
+ break;
+ }
+
+ if (!block)
+ continue;
+
+ if (!max_block) {
+ max_block = block;
+ continue;
+ }
+
+ if (drm_buddy_block_offset(block) >
+ drm_buddy_block_offset(max_block)) {
+ max_block = block;
}
}
@@ -450,12 +626,30 @@ alloc_from_freelist(struct drm_buddy *mm,
int err;
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(mm, order);
+ block = get_maxblock(mm, order, flags);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ struct drm_buddy_block *tmp_block;
+
+ list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
+ if (block_incompatible(tmp_block, flags))
+ continue;
+
+ block = tmp_block;
+ break;
+ }
+
+ if (block)
+ break;
+ }
+ }
+
+ if (!block) {
+ /* Fallback method */
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
if (!list_empty(&mm->free_list[tmp])) {
block = list_last_entry(&mm->free_list[tmp],
struct drm_buddy_block,
@@ -464,10 +658,10 @@ alloc_from_freelist(struct drm_buddy *mm,
break;
}
}
- }
- if (!block)
- return ERR_PTR(-ENOSPC);
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+ }
BUG_ON(!drm_buddy_block_is_free(block));
@@ -483,7 +677,7 @@ alloc_from_freelist(struct drm_buddy *mm,
err_undo:
if (tmp != order)
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
return ERR_PTR(err);
}
@@ -526,16 +720,18 @@ static int __alloc_range(struct drm_buddy *mm,
}
if (contains(start, end, block_start, block_end)) {
- if (!drm_buddy_block_is_free(block)) {
+ if (drm_buddy_block_is_free(block)) {
+ mark_allocated(block);
+ total_allocated += drm_buddy_block_size(mm, block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ } else if (!mm->clear_avail) {
err = -ENOSPC;
goto err_free;
}
-
- mark_allocated(block);
- total_allocated += drm_buddy_block_size(mm, block);
- mm->avail -= drm_buddy_block_size(mm, block);
- list_add_tail(&block->link, &allocated);
- continue;
}
if (!drm_buddy_block_is_split(block)) {
@@ -567,14 +763,14 @@ err_undo:
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
- __drm_buddy_free(mm, block);
+ __drm_buddy_free(mm, block, false);
err_free:
if (err == -ENOSPC && total_allocated_on_err) {
list_splice_tail(&allocated, blocks);
*total_allocated_on_err = total_allocated;
} else {
- drm_buddy_free_list(mm, &allocated);
+ drm_buddy_free_list_internal(mm, &allocated);
}
return err;
@@ -640,11 +836,11 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
list_splice(&blocks_lhs, blocks);
return 0;
} else if (err != -ENOSPC) {
- drm_buddy_free_list(mm, blocks);
+ drm_buddy_free_list_internal(mm, blocks);
return err;
}
/* Free blocks for the next iteration */
- drm_buddy_free_list(mm, blocks);
+ drm_buddy_free_list_internal(mm, blocks);
}
return -ENOSPC;
@@ -700,6 +896,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_del(&block->link);
mark_free(mm, block);
mm->avail += drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail += drm_buddy_block_size(mm, block);
/* Prevent recursively freeing this node */
parent = block->parent;
@@ -711,6 +909,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
if (err) {
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
list_add(&block->link, blocks);
}
@@ -719,13 +919,28 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
}
EXPORT_SYMBOL(drm_buddy_block_trim);
+static struct drm_buddy_block *
+__drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order,
+ unsigned long flags)
+{
+ if (flags & DRM_BUDDY_RANGE_ALLOCATION)
+ /* Allocate traversing within the range */
+ return __drm_buddy_alloc_range_bias(mm, start, end,
+ order, flags);
+ else
+ /* Allocate from freelist */
+ return alloc_from_freelist(mm, order, flags);
+}
+
/**
* drm_buddy_alloc_blocks - allocate power-of-two blocks
*
* @mm: DRM buddy manager to allocate from
* @start: start of the allowed range for this block
* @end: end of the allowed range for this block
- * @size: size of the allocation
+ * @size: size of the allocation in bytes
* @min_block_size: alignment of the allocation
* @blocks: output list head to add allocated blocks
* @flags: DRM_BUDDY_*_ALLOCATION flags
@@ -800,23 +1015,33 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
BUG_ON(order < min_order);
do {
- if (flags & DRM_BUDDY_RANGE_ALLOCATION)
- /* Allocate traversing within the range */
- block = alloc_range_bias(mm, start, end, order);
- else
- /* Allocate from freelist */
- block = alloc_from_freelist(mm, order, flags);
-
+ block = __drm_buddy_alloc_blocks(mm, start,
+ end,
+ order,
+ flags);
if (!IS_ERR(block))
break;
if (order-- == min_order) {
+ /* Try allocation through force merge method */
+ if (mm->clear_avail &&
+ !__force_merge(mm, start, end, min_order)) {
+ block = __drm_buddy_alloc_blocks(mm, start,
+ end,
+ min_order,
+ flags);
+ if (!IS_ERR(block)) {
+ order = min_order;
+ break;
+ }
+ }
+
+ /*
+ * Try contiguous block allocation through
+ * try harder method.
+ */
if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
!(flags & DRM_BUDDY_RANGE_ALLOCATION))
- /*
- * Try contiguous block allocation through
- * try harder method
- */
return __alloc_contig_try_harder(mm,
original_size,
original_min_size,
@@ -828,6 +1053,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
kmemleak_update_trace(block);
list_add_tail(&block->link, &allocated);
@@ -866,7 +1093,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return 0;
err_free:
- drm_buddy_free_list(mm, &allocated);
+ drm_buddy_free_list_internal(mm, &allocated);
return err;
}
EXPORT_SYMBOL(drm_buddy_alloc_blocks);
@@ -899,8 +1126,8 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
{
int order;
- drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
- mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
+ drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
struct drm_buddy_block *block;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 9403b3f576f7..2803ac111bbd 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -172,6 +172,18 @@ void drm_client_release(struct drm_client_dev *client)
}
EXPORT_SYMBOL(drm_client_release);
+/**
+ * drm_client_dev_unregister - Unregister clients
+ * @dev: DRM device
+ *
+ * This function releases all clients by calling each client's
+ * &drm_client_funcs.unregister callback. The callback function
+ * is responsibe for releaseing all resources including the client
+ * itself.
+ *
+ * The helper drm_dev_unregister() calls this function. Drivers
+ * that use it don't need to call this function themselves.
+ */
void drm_client_dev_unregister(struct drm_device *dev)
{
struct drm_client_dev *client, *tmp;
@@ -191,6 +203,7 @@ void drm_client_dev_unregister(struct drm_device *dev)
}
mutex_unlock(&dev->clientlist_mutex);
}
+EXPORT_SYMBOL(drm_client_dev_unregister);
/**
* drm_client_dev_hotplug - Send hotplug event to clients
@@ -305,6 +318,66 @@ err_delete:
}
/**
+ * drm_client_buffer_vmap_local - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ * @map_copy: Returns the mapped memory's address
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the existing mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap_local() should be closely followed by a call to
+ * drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for
+ * long-term mappings.
+ *
+ * The returned address is a copy of the internal value. In contrast to
+ * other vmap interfaces, you don't need it for the client's vunmap
+ * function. So you can modify it at will during blit and draw operations.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
+{
+ struct drm_gem_object *gem = buffer->gem;
+ struct iosys_map *map = &buffer->map;
+ int ret;
+
+ drm_gem_lock(gem);
+
+ ret = drm_gem_vmap(gem, map);
+ if (ret)
+ goto err_drm_gem_vmap_unlocked;
+ *map_copy = *map;
+
+ return 0;
+
+err_drm_gem_vmap_unlocked:
+ drm_gem_unlock(gem);
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap_local);
+
+/**
+ * drm_client_buffer_vunmap_local - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping established
+ * with drm_client_buffer_vunmap_local(). Calling this function is only
+ * required by clients that manage their buffer mappings by themselves.
+ */
+void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
+{
+ struct drm_gem_object *gem = buffer->gem;
+ struct iosys_map *map = &buffer->map;
+
+ drm_gem_vunmap(gem, map);
+ drm_gem_unlock(gem);
+}
+EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
+
+/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
@@ -328,24 +401,30 @@ int
drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
+ struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
int ret;
- /*
- * FIXME: The dependency on GEM here isn't required, we could
- * convert the driver handle to a dma-buf instead and use the
- * backend-agnostic dma-buf vmap support instead. This would
- * require that the handle2fd prime ioctl is reworked to pull the
- * fd_install step out of the driver backend hooks, to make that
- * final step optional for internal users.
- */
- ret = drm_gem_vmap_unlocked(buffer->gem, map);
+ drm_gem_lock(gem);
+
+ ret = drm_gem_pin_locked(gem);
if (ret)
- return ret;
+ goto err_drm_gem_pin_locked;
+ ret = drm_gem_vmap(gem, map);
+ if (ret)
+ goto err_drm_gem_vmap;
+
+ drm_gem_unlock(gem);
*map_copy = *map;
return 0;
+
+err_drm_gem_vmap:
+ drm_gem_unpin_locked(buffer->gem);
+err_drm_gem_pin_locked:
+ drm_gem_unlock(gem);
+ return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -359,9 +438,13 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
+ struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap_unlocked(buffer->gem, map);
+ drm_gem_lock(gem);
+ drm_gem_vunmap(gem, map);
+ drm_gem_unpin_locked(gem);
+ drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 871e4e2129d6..31af5cf37a09 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -242,8 +242,10 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i]));
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] enabled? %s\n",
+ connector->base.id, connector->name,
+ connector->display_info.non_desktop ?
+ "non desktop" : str_yes_no(enabled[i]));
any_enabled |= enabled[i];
}
@@ -303,7 +305,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
}
if (can_clone) {
- DRM_DEBUG_KMS("can clone using command line\n");
+ drm_dbg_kms(dev, "can clone using command line\n");
return true;
}
@@ -332,15 +334,16 @@ static bool drm_client_target_cloned(struct drm_device *dev,
kfree(dmt_mode);
if (can_clone) {
- DRM_DEBUG_KMS("can clone using 1024x768\n");
+ drm_dbg_kms(dev, "can clone using 1024x768\n");
return true;
}
fail:
- DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ drm_info(dev, "kms: can't enable cloning when we probably wanted to.\n");
return false;
}
-static int drm_client_get_tile_offsets(struct drm_connector **connectors,
+static int drm_client_get_tile_offsets(struct drm_device *dev,
+ struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
@@ -357,8 +360,9 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors,
continue;
if (!modes[i] && (h_idx || v_idx)) {
- DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
- connector->base.id);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] no modes for connector tiled %d\n",
+ connector->base.id, connector->name, i);
continue;
}
if (connector->tile_h_loc < h_idx)
@@ -369,11 +373,12 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors,
}
offsets[idx].x = hoffset;
offsets[idx].y = voffset;
- DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ drm_dbg_kms(dev, "returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
return 0;
}
-static bool drm_client_target_preferred(struct drm_connector **connectors,
+static bool drm_client_target_preferred(struct drm_device *dev,
+ struct drm_connector **connectors,
unsigned int connector_count,
struct drm_display_mode **modes,
struct drm_client_offset *offsets,
@@ -423,17 +428,19 @@ retry:
* find the tile offsets for this pass - need to find
* all tiles left and above
*/
- drm_client_get_tile_offsets(connectors, connector_count, modes, offsets, i,
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
+ connector->base.id, connector->name);
/* got for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
- connector->base.id, connector->tile_group ? connector->tile_group->id : 0);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
+ connector->base.id, connector->name,
+ connector->tile_group ? connector->tile_group->id : 0);
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
@@ -455,16 +462,18 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
+ connector->base.id, connector->name);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
} else {
modes[i] = drm_connector_get_tiled_mode(connector);
}
}
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
+ connector->base.id, connector->name,
+ modes[i] ? modes[i]->name : "none");
conn_configured |= BIT_ULL(i);
}
@@ -585,7 +594,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
if (!drm_drv_uses_atomic_modeset(dev))
return false;
- if (WARN_ON(count <= 0))
+ if (drm_WARN_ON(dev, count <= 0))
return false;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
@@ -624,26 +633,26 @@ retry:
num_connectors_detected++;
if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] not enabled, skipping\n",
+ connector->base.id, connector->name);
conn_configured |= BIT(i);
continue;
}
if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disabled by user, skipping\n",
+ connector->base.id, connector->name);
enabled[i] = false;
continue;
}
encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (!encoder || drm_WARN_ON(dev, !connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] has no encoder or crtc, skipping\n",
+ connector->base.id, connector->name);
enabled[i] = false;
conn_configured |= BIT(i);
continue;
@@ -660,28 +669,30 @@ retry:
*/
for (j = 0; j < count; j++) {
if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ drm_dbg_kms(dev, "fallback: cloned configuration\n");
goto bail;
}
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
+ connector->base.id, connector->name);
/* go for command line mode first */
modes[i] = drm_connector_pick_cmdline_mode(connector);
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
+ drm_dbg_kms(dev,
+ "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
+ connector->base.id, connector->name,
+ str_yes_no(connector->has_tile));
modes[i] = drm_connector_has_preferred_mode(connector, width, height);
}
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
+ connector->base.id, connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
@@ -700,8 +711,8 @@ retry:
* This is crtc->mode and not crtc->state->mode for the
* fastboot check to work correctly.
*/
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
+ connector->base.id, connector->name);
modes[i] = &connector->state->crtc->mode;
}
/*
@@ -710,18 +721,18 @@ retry:
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
- connector->base.id);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
+ connector->base.id, connector->name);
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->base.id, connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
fallback = false;
conn_configured |= BIT(i);
@@ -737,15 +748,15 @@ retry:
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < dev->mode_config.num_crtc) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
+ drm_dbg_kms(dev, "fallback: Not all outputs enabled\n");
+ drm_dbg_kms(dev, "Enabled: %i, detected: %i\n",
+ num_connectors_enabled, num_connectors_detected);
fallback = true;
}
if (fallback) {
bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
+ drm_dbg_kms(dev, "Not using firmware configuration\n");
memcpy(enabled, save_enabled, count);
ret = false;
}
@@ -777,12 +788,13 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(dev, "\n");
if (!width)
width = dev->mode_config.max_width;
@@ -813,7 +825,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
offsets = kcalloc(connector_count, sizeof(*offsets), GFP_KERNEL);
enabled = kcalloc(connector_count, sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
- DRM_ERROR("Memory allocation failed\n");
ret = -ENOMEM;
goto out;
}
@@ -824,7 +835,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
for (i = 0; i < connector_count; i++)
total_modes_count += connectors[i]->funcs->fill_modes(connectors[i], width, height);
if (!total_modes_count)
- DRM_DEBUG_KMS("No connectors reported connected with modes\n");
+ drm_dbg_kms(dev, "No connectors reported connected with modes\n");
drm_client_connectors_enabled(connectors, connector_count, enabled);
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
@@ -835,17 +846,16 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_target_cloned(dev, connectors, connector_count, modes,
offsets, enabled, width, height) &&
- !drm_client_target_preferred(connectors, connector_count, modes,
+ !drm_client_target_preferred(dev, connectors, connector_count, modes,
offsets, enabled, width, height))
- DRM_ERROR("Unable to find initial modes\n");
+ drm_err(dev, "Unable to find initial modes\n");
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
- width, height);
+ drm_dbg_kms(dev, "picking CRTCs for %dx%d config\n",
+ width, height);
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -858,11 +868,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
struct drm_mode_set *modeset = drm_client_find_modeset(client, crtc);
struct drm_connector *connector = connectors[i];
- DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
- mode->name, crtc->base.id, offset->x, offset->y);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] desired mode %s set (%d,%d)\n",
+ crtc->base.id, crtc->name,
+ mode->name, offset->x, offset->y);
- if (WARN_ON_ONCE(modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
- (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
+ if (drm_WARN_ON_ONCE(dev, modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
+ (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
ret = -EINVAL;
break;
}
@@ -875,6 +886,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82c665d3e74b..483969b84a30 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -716,10 +716,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ drm_dbg_kms(dev, "Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
}
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
plane = crtc->primary;
@@ -742,7 +742,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
old_fb = plane->fb;
if (!old_fb) {
- DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ drm_dbg_kms(dev, "CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
@@ -753,8 +753,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
} else {
fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
- DRM_DEBUG_KMS("Unknown FB ID%d\n",
- crtc_req->fb_id);
+ drm_dbg_kms(dev, "Unknown FB ID%d\n",
+ crtc_req->fb_id);
ret = -ENOENT;
goto out;
}
@@ -767,7 +767,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
if (!file_priv->aspect_ratio_allowed &&
(crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) {
- DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n");
+ drm_dbg_kms(dev, "Unexpected aspect-ratio flag bits\n");
ret = -EINVAL;
goto out;
}
@@ -775,9 +775,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
- ret, drm_get_mode_status_name(mode->status));
- drm_mode_debug_printmodeline(mode);
+ drm_dbg_kms(dev, "Invalid mode (%s, %pe): " DRM_MODE_FMT "\n",
+ drm_get_mode_status_name(mode->status),
+ ERR_PTR(ret), DRM_MODE_ARG(mode));
goto out;
}
@@ -793,9 +793,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
fb->format->format,
fb->modifier);
if (ret) {
- DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
- &fb->format->format,
- fb->modifier);
+ drm_dbg_kms(dev, "Invalid pixel format %p4cc, modifier 0x%llx\n",
+ &fb->format->format, fb->modifier);
goto out;
}
}
@@ -808,14 +807,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
if (crtc_req->count_connectors == 0 && mode) {
- DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ drm_dbg_kms(dev, "Count connectors is 0 but mode set\n");
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
- DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
+ drm_dbg_kms(dev, "Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
ret = -EINVAL;
goto out;
}
@@ -847,14 +846,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
- DRM_DEBUG_KMS("Connector id %d unknown\n",
- out_id);
+ drm_dbg_kms(dev, "Connector id %d unknown\n",
+ out_id);
ret = -ENOENT;
goto out;
}
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
connector_set[i] = connector;
num_connectors++;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2dafc39a27cb..0955f1c385dd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -110,15 +110,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress) {
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
@@ -150,14 +150,14 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
@@ -230,7 +230,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
@@ -294,7 +294,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_encoder *encoder;
bool ret = true;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_warn_on_modeset_not_all_locked(dev);
@@ -338,7 +338,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
- DRM_DEBUG_KMS("Encoder fixup failed\n");
+ drm_dbg_kms(dev, "[ENCODER:%d:%s] mode fixup failed\n",
+ encoder->base.id, encoder->name);
goto done;
}
}
@@ -347,11 +348,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (crtc_funcs->mode_fixup) {
if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
adjusted_mode))) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] mode fixup failed\n",
+ crtc->base.id, crtc->name);
goto done;
}
}
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
drm_mode_copy(&crtc->hwmode, adjusted_mode);
@@ -390,8 +392,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n",
- encoder->base.id, encoder->name, mode->name);
+ drm_dbg_kms(dev, "[ENCODER:%d:%s] set [MODE:%s]\n",
+ encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
@@ -503,7 +505,7 @@ drm_connector_get_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_WARN_ON(connector->dev, hweight32(connector->possible_encoders) > 1);
drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
@@ -564,8 +566,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
int ret;
int i;
- DRM_DEBUG_KMS("\n");
-
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
@@ -577,19 +577,22 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
crtc_funcs = set->crtc->helper_private;
dev = set->crtc->dev;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
+ drm_dbg_kms(dev, "\n");
+
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
if (!set->mode)
set->fb = NULL;
if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->crtc->name,
- set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->crtc->name,
+ set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
} else {
- DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
- set->crtc->base.id, set->crtc->name);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] [NOFB]\n",
+ set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc);
return 0;
}
@@ -639,7 +642,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->primary->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] no fb, full mode set\n",
+ set->crtc->base.id, set->crtc->name);
mode_changed = true;
} else if (set->fb->format != set->crtc->primary->fb->format) {
mode_changed = true;
@@ -651,9 +655,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
fb_changed = true;
if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
- DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] modes are different, full mode set:\n",
+ set->crtc->base.id, set->crtc->name);
+ drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(&set->crtc->mode));
+ drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(set->mode));
mode_changed = true;
}
@@ -687,7 +692,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
fail = 1;
if (connector->dpms != DRM_MODE_DPMS_ON) {
- DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] DPMS not on, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
}
@@ -696,7 +702,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
}
if (new_encoder != connector->encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
/* If the encoder is reused for another connector, then
* the appropriate crtc will be set later.
@@ -737,17 +744,18 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
goto fail;
}
if (new_crtc != connector->encoder->crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] CRTC changed, full mode switch\n",
+ connector->base.id, connector->name);
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
if (new_crtc) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
- connector->base.id, connector->name,
- new_crtc->base.id, new_crtc->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
+ new_crtc->base.id, new_crtc->name);
} else {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, connector->name);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -758,23 +766,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
if (mode_changed) {
if (drm_helper_crtc_in_use(set->crtc)) {
- DRM_DEBUG_KMS("attempting to set mode from"
- " userspace\n");
- drm_mode_debug_printmodeline(set->mode);
+ drm_dbg_kms(dev, "[CRTC:%d:%s] attempting to set mode from userspace: " DRM_MODE_FMT "\n",
+ set->crtc->base.id, set->crtc->name, DRM_MODE_ARG(set->mode));
set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
- set->crtc->base.id, set->crtc->name);
+ drm_err(dev, "[CRTC:%d:%s] failed to set mode\n",
+ set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ drm_dbg_kms(dev, "[CRTC:%d:%s] Setting connector DPMS state to on\n",
+ set->crtc->base.id, set->crtc->name);
for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- set->connectors[i]->name);
+ drm_dbg_kms(dev, "\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
@@ -823,7 +831,7 @@ fail:
if (mode_changed &&
!drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
save_set.y, save_set.fb))
- DRM_ERROR("failed to restore config after modeset failure\n");
+ drm_err(dev, "failed to restore config after modeset failure\n");
kfree(save_connector_encoders);
kfree(save_encoder_crtcs);
@@ -905,7 +913,7 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
- WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
+ drm_WARN_ON(connector->dev, drm_drv_uses_atomic_modeset(connector->dev));
if (mode == connector->dpms)
return 0;
@@ -980,7 +988,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
int encoder_dpms;
bool ret;
- WARN_ON(drm_drv_uses_atomic_modeset(dev));
+ drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
@@ -993,7 +1001,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
/* Restoring the old config should never fail! */
if (ret == false)
- DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+ drm_err(dev, "failed to set mode on crtc %p\n", crtc);
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 28e04e750130..8059f65c5d6c 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -26,10 +26,15 @@
* implementation details and are not exported to drivers.
*/
-#include <drm/drm_connector.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_modes.h>
+#ifndef __DRM_CRTC_HELPER_INTERNAL_H__
+#define __DRM_CRTC_HELPER_INTERNAL_H__
+
+enum drm_mode_status;
+struct drm_connector;
+struct drm_crtc;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_modeset_acquire_ctx;
/* drm_probe_helper.c */
enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
@@ -44,3 +49,5 @@ drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *
drm_connector_get_single_encoder(struct drm_connector *connector);
+
+#endif /* __DRM_CRTC_HELPER_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a514d5207e41..25aaae937ceb 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -32,6 +32,10 @@
* and are not exported to drivers.
*/
+#ifndef __DRM_CRTC_INTERNAL_H__
+#define __DRM_CRTC_INTERNAL_H__
+
+#include <linux/err.h>
#include <linux/types.h>
enum drm_color_encoding;
@@ -39,12 +43,14 @@ enum drm_color_range;
enum drm_connector_force;
enum drm_mode_status;
+struct cea_sad;
struct drm_atomic_state;
struct drm_bridge;
struct drm_connector;
struct drm_crtc;
struct drm_device;
struct drm_display_mode;
+struct drm_edid;
struct drm_file;
struct drm_framebuffer;
struct drm_mode_create_dumb;
@@ -54,6 +60,7 @@ struct drm_mode_object;
struct drm_mode_set;
struct drm_plane;
struct drm_plane_state;
+struct drm_printer;
struct drm_property;
struct edid;
struct fwnode_handle;
@@ -292,6 +299,10 @@ void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m);
int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size);
int drm_edid_override_reset(struct drm_connector *connector);
+const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid,
+ int ext_id, int *ext_index);
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
/* drm_edid_load.c */
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
@@ -303,3 +314,5 @@ drm_edid_load_firmware(struct drm_connector *connector)
return ERR_PTR(-ENOENT);
}
#endif
+
+#endif /* __DRM_CRTC_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 9edc111be7ee..9d01d762801f 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -3,10 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_displayid.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include "drm_crtc_internal.h"
+#include "drm_displayid_internal.h"
+
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
@@ -53,9 +55,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
int *length, int *idx,
int *ext_index)
{
- const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
+ const u8 *displayid;
+ displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index);
if (!displayid)
return NULL;
diff --git a/include/drm/drm_displayid.h b/drivers/gpu/drm/drm_displayid_internal.h
index 566497eeb3b8..aee1b86a73c1 100644
--- a/include/drm/drm_displayid.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -19,8 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef DRM_DISPLAYID_H
-#define DRM_DISPLAYID_H
+
+#ifndef __DRM_DISPLAYID_INTERNAL_H__
+#define __DRM_DISPLAYID_INTERNAL_H__
#include <linux/types.h>
#include <linux/bits.h>
@@ -30,7 +31,6 @@ struct drm_edid;
#define VESA_IEEE_OUI 0x3a0292
/* DisplayID Structure versions */
-#define DISPLAY_ID_STRUCTURE_VER_12 0x12
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
/* DisplayID Structure v1r2 Data Blocks */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 243cacb3575c..535b624d4c9d 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_machine.h>
@@ -638,6 +639,7 @@ static int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ raw_spin_lock_init(&dev->mode_config.panic_lock);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
@@ -943,6 +945,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_unload;
}
+ drm_panic_register(dev);
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -987,6 +990,8 @@ void drm_dev_unregister(struct drm_device *dev)
{
dev->registered = false;
+ drm_panic_unregister(dev);
+
drm_client_dev_unregister(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 923c4423151c..4f54c91b31b2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,16 +29,17 @@
*/
#include <linux/bitfield.h>
+#include <linux/byteorder/generic.h>
#include <linux/cec.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_displayid.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
@@ -46,6 +47,7 @@
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_displayid_internal.h"
#include "drm_internal.h"
static int oui(u8 first, u8 second, u8 third)
@@ -102,6 +104,11 @@ struct detailed_mode_closure {
int modes;
};
+struct drm_edid_match_closure {
+ const struct drm_edid_ident *ident;
+ bool matched;
+};
+
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_GTF2 2
@@ -109,13 +116,15 @@ struct detailed_mode_closure {
#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
{ \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, \
+ vend_chr_2, product_id), \
+ }, \
.quirks = _quirks \
}
static const struct edid_quirk {
- u32 panel_id;
+ const struct drm_edid_ident ident;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
@@ -1811,36 +1820,25 @@ static bool edid_block_is_zero(const void *edid)
return !memchr_inv(edid, 0, EDID_LENGTH);
}
-/**
- * drm_edid_are_equal - compare two edid blobs.
- * @edid1: pointer to first blob
- * @edid2: pointer to second blob
- * This helper can be used during probing to determine if
- * edid had changed.
- */
-bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
+static bool drm_edid_eq(const struct drm_edid *drm_edid,
+ const void *raw_edid, size_t raw_edid_size)
{
- int edid1_len, edid2_len;
- bool edid1_present = edid1 != NULL;
- bool edid2_present = edid2 != NULL;
+ bool edid1_present = drm_edid && drm_edid->edid && drm_edid->size;
+ bool edid2_present = raw_edid && raw_edid_size;
if (edid1_present != edid2_present)
return false;
- if (edid1) {
- edid1_len = edid_size(edid1);
- edid2_len = edid_size(edid2);
-
- if (edid1_len != edid2_len)
+ if (edid1_present) {
+ if (drm_edid->size != raw_edid_size)
return false;
- if (memcmp(edid1, edid2, edid1_len))
+ if (memcmp(drm_edid->edid, raw_edid, drm_edid->size))
return false;
}
return true;
}
-EXPORT_SYMBOL(drm_edid_are_equal);
enum edid_block_status {
EDID_BLOCK_OK = 0,
@@ -2749,8 +2747,84 @@ const struct drm_edid *drm_edid_read(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_edid_read);
-static u32 edid_extract_panel_id(const struct edid *edid)
+/**
+ * drm_edid_get_product_id - Get the vendor and product identification
+ * @drm_edid: EDID
+ * @id: Where to place the product id
+ */
+void drm_edid_get_product_id(const struct drm_edid *drm_edid,
+ struct drm_edid_product_id *id)
+{
+ if (drm_edid && drm_edid->edid && drm_edid->size >= EDID_LENGTH)
+ memcpy(id, &drm_edid->edid->product_id, sizeof(*id));
+ else
+ memset(id, 0, sizeof(*id));
+}
+EXPORT_SYMBOL(drm_edid_get_product_id);
+
+static void decode_date(struct seq_buf *s, const struct drm_edid_product_id *id)
+{
+ int week = id->week_of_manufacture;
+ int year = id->year_of_manufacture + 1990;
+
+ if (week == 0xff)
+ seq_buf_printf(s, "model year: %d", year);
+ else if (!week)
+ seq_buf_printf(s, "year of manufacture: %d", year);
+ else
+ seq_buf_printf(s, "week/year of manufacture: %d/%d", week, year);
+}
+
+/**
+ * drm_edid_print_product_id - Print decoded product id to printer
+ * @p: drm printer
+ * @id: EDID product id
+ * @raw: If true, also print the raw hex
+ *
+ * See VESA E-EDID 1.4 section 3.4.
+ */
+void drm_edid_print_product_id(struct drm_printer *p,
+ const struct drm_edid_product_id *id, bool raw)
+{
+ DECLARE_SEQ_BUF(date, 40);
+ char vend[4];
+
+ drm_edid_decode_mfg_id(be16_to_cpu(id->manufacturer_name), vend);
+
+ decode_date(&date, id);
+
+ drm_printf(p, "manufacturer name: %s, product code: %u, serial number: %u, %s\n",
+ vend, le16_to_cpu(id->product_code),
+ le32_to_cpu(id->serial_number), seq_buf_str(&date));
+
+ if (raw)
+ drm_printf(p, "raw product id: %*ph\n", (int)sizeof(*id), id);
+
+ WARN_ON(seq_buf_has_overflowed(&date));
+}
+EXPORT_SYMBOL(drm_edid_print_product_id);
+
+/**
+ * drm_edid_get_panel_id - Get a panel's ID from EDID
+ * @drm_edid: EDID that contains panel ID.
+ *
+ * This function uses the first block of the EDID of a panel and (assuming
+ * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
+ * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
+ * supposed to be different for each different modem of panel.
+ *
+ * Return: A 32-bit ID that should be different for each make/model of panel.
+ * See the functions drm_edid_encode_panel_id() and
+ * drm_edid_decode_panel_id() for some details on the structure of this
+ * ID. Return 0 if the EDID size is less than a base block.
+ */
+u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid)
{
+ const struct edid *edid = drm_edid->edid;
+
+ if (drm_edid->size < EDID_LENGTH)
+ return 0;
+
/*
* We represent the ID as a 32-bit number so it can easily be compared
* with "==".
@@ -2768,60 +2842,54 @@ static u32 edid_extract_panel_id(const struct edid *edid)
(u32)edid->mfg_id[1] << 16 |
(u32)EDID_PRODUCT_ID(edid);
}
+EXPORT_SYMBOL(drm_edid_get_panel_id);
/**
- * drm_edid_get_panel_id - Get a panel's ID through DDC
+ * drm_edid_read_base_block - Get a panel's EDID base block
* @adapter: I2C adapter to use for DDC
*
- * This function reads the first block of the EDID of a panel and (assuming
- * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
- * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
- * supposed to be different for each different modem of panel.
+ * This function returns the drm_edid containing the first block of the EDID of
+ * a panel.
*
* This function is intended to be used during early probing on devices where
* more than one panel might be present. Because of its intended use it must
- * assume that the EDID of the panel is correct, at least as far as the ID
- * is concerned (in other words, we don't process any overrides here).
+ * assume that the EDID of the panel is correct, at least as far as the base
+ * block is concerned (in other words, we don't process any overrides here).
+ *
+ * Caller should call drm_edid_free() after use.
*
* NOTE: it's expected that this function and drm_do_get_edid() will both
* be read the EDID, but there is no caching between them. Since we're only
* reading the first block, hopefully this extra overhead won't be too big.
*
- * Return: A 32-bit ID that should be different for each make/model of panel.
- * See the functions drm_edid_encode_panel_id() and
- * drm_edid_decode_panel_id() for some details on the structure of this
- * ID.
+ * WARNING: Only use this function when the connector is unknown. For example,
+ * during the early probe of panel. The EDID read from the function is temporary
+ * and should be replaced by the full EDID returned from other drm_edid_read.
+ *
+ * Return: Pointer to allocated EDID base block, or NULL on any failure.
*/
-
-u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter)
{
enum edid_block_status status;
void *base_block;
- u32 panel_id = 0;
-
- /*
- * There are no manufacturer IDs of 0, so if there is a problem reading
- * the EDID then we'll just return 0.
- */
base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
- return 0;
+ return NULL;
status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
edid_block_status_print(status, base_block, 0);
- if (edid_block_status_valid(status, edid_block_tag(base_block)))
- panel_id = edid_extract_panel_id(base_block);
- else
+ if (!edid_block_status_valid(status, edid_block_tag(base_block))) {
edid_block_dump(KERN_NOTICE, base_block, 0);
+ kfree(base_block);
+ return NULL;
+ }
- kfree(base_block);
-
- return panel_id;
+ return _drm_edid_alloc(base_block, EDID_LENGTH);
}
-EXPORT_SYMBOL(drm_edid_get_panel_id);
+EXPORT_SYMBOL(drm_edid_read_base_block);
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
@@ -2903,16 +2971,17 @@ EXPORT_SYMBOL(drm_edid_duplicate);
* @drm_edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
+ *
+ * Return: A u32 represents the quirks to apply.
*/
static u32 edid_get_quirks(const struct drm_edid *drm_edid)
{
- u32 panel_id = edid_extract_panel_id(drm_edid->edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
- if (quirk->panel_id == panel_id)
+ if (drm_edid_match(drm_edid, &quirk->ident))
return quirk->quirks;
}
@@ -4120,7 +4189,7 @@ static int add_detailed_modes(struct drm_connector *connector,
*
* FIXME: Prefer not returning pointers to raw EDID data.
*/
-const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
+const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index)
{
const u8 *edid_ext = NULL;
@@ -4150,11 +4219,21 @@ static bool drm_edid_has_cta_extension(const struct drm_edid *drm_edid)
{
const struct displayid_block *block;
struct displayid_iter iter;
- int ext_index = 0;
+ struct drm_edid_iter edid_iter;
+ const u8 *ext;
bool found = false;
/* Look for a top level CEA extension block */
- if (drm_find_edid_extension(drm_edid, CEA_EXT, &ext_index))
+ drm_edid_iter_begin(drm_edid, &edid_iter);
+ drm_edid_iter_for_each(ext, &edid_iter) {
+ if (ext[0] == CEA_EXT) {
+ found = true;
+ break;
+ }
+ }
+ drm_edid_iter_end(&edid_iter);
+
+ if (found)
return true;
/* CEA blocks can also be found embedded in a DisplayID block */
@@ -5443,6 +5522,66 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
}
static void
+match_identity(const struct detailed_timing *timing, void *data)
+{
+ struct drm_edid_match_closure *closure = data;
+ unsigned int i;
+ const char *name = closure->ident->name;
+ unsigned int name_len = strlen(name);
+ const char *desc = timing->data.other_data.data.str.str;
+ unsigned int desc_len = ARRAY_SIZE(timing->data.other_data.data.str.str);
+
+ if (name_len > desc_len ||
+ !(is_display_descriptor(timing, EDID_DETAIL_MONITOR_NAME) ||
+ is_display_descriptor(timing, EDID_DETAIL_MONITOR_STRING)))
+ return;
+
+ if (strncmp(name, desc, name_len))
+ return;
+
+ for (i = name_len; i < desc_len; i++) {
+ if (desc[i] == '\n')
+ break;
+ /* Allow white space before EDID string terminator. */
+ if (!isspace(desc[i]))
+ return;
+ }
+
+ closure->matched = true;
+}
+
+/**
+ * drm_edid_match - match drm_edid with given identity
+ * @drm_edid: EDID
+ * @ident: the EDID identity to match with
+ *
+ * Check if the EDID matches with the given identity.
+ *
+ * Return: True if the given identity matched with EDID, false otherwise.
+ */
+bool drm_edid_match(const struct drm_edid *drm_edid,
+ const struct drm_edid_ident *ident)
+{
+ if (!drm_edid || drm_edid_get_panel_id(drm_edid) != ident->panel_id)
+ return false;
+
+ /* Match with name only if it's not NULL. */
+ if (ident->name) {
+ struct drm_edid_match_closure closure = {
+ .ident = ident,
+ .matched = false,
+ };
+
+ drm_for_each_detailed_block(drm_edid, match_identity, &closure);
+
+ return closure.matched;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_match);
+
+static void
monitor_name(const struct detailed_timing *timing, void *data)
{
const char **res = data;
@@ -6787,15 +6926,14 @@ static int _drm_edid_connector_property_update(struct drm_connector *connector,
int ret;
if (connector->edid_blob_ptr) {
- const struct edid *old_edid = connector->edid_blob_ptr->data;
-
- if (old_edid) {
- if (!drm_edid_are_equal(drm_edid ? drm_edid->edid : NULL, old_edid)) {
- connector->epoch_counter++;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
- connector->base.id, connector->name,
- connector->epoch_counter);
- }
+ const void *old_edid = connector->edid_blob_ptr->data;
+ size_t old_edid_size = connector->edid_blob_ptr->length;
+
+ if (old_edid && !drm_edid_eq(drm_edid, old_edid, old_edid_size)) {
+ connector->epoch_counter++;
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n",
+ connector->base.id, connector->name,
+ connector->epoch_counter);
}
}
@@ -7324,7 +7462,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
const struct displayid_block *block)
{
- return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c
index 5177991aa272..c0428d07de53 100644
--- a/drivers/gpu/drm/drm_eld.c
+++ b/drivers/gpu/drm/drm_eld.c
@@ -3,10 +3,12 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/**
* drm_eld_sad_get - get SAD from ELD to struct cea_sad
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 3b535ad1b07c..96e5ab960f12 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -15,6 +15,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -148,3 +149,44 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
}
}
EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent);
+
+/**
+ * drm_fb_dma_get_scanout_buffer - Provide a scanout buffer in case of panic
+ * @plane: DRM primary plane
+ * @sb: scanout buffer for the panic handler
+ * Returns: 0 or negative error code
+ *
+ * Generic get_scanout_buffer() implementation, for drivers that uses the
+ * drm_fb_dma_helper. It won't call vmap in the panic context, so the driver
+ * should make sure the primary plane is vmapped, otherwise the panic screen
+ * won't get displayed.
+ */
+int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_framebuffer *fb;
+
+ fb = plane->state->fb;
+ /* Only support linear modifier */
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ return -ENODEV;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+
+ /* Buffer should be accessible from the CPU */
+ if (dma_obj->base.import_attach)
+ return -ENODEV;
+
+ /* Buffer should be already mapped to CPU */
+ if (!dma_obj->vaddr)
+ return -ENODEV;
+
+ iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr);
+ sb->format = fb->format;
+ sb->height = fb->height;
+ sb->width = fb->width;
+ sb->pitch[0] = fb->pitches[0];
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_dma_get_scanout_buffer);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index d647d89764cb..be357f926fae 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -197,14 +197,14 @@ static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
*/
mutex_lock(&fb_helper->lock);
- ret = drm_client_buffer_vmap(buffer, &map);
+ ret = drm_client_buffer_vmap_local(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
- drm_client_buffer_vunmap(buffer);
+ drm_client_buffer_vunmap_local(buffer);
out:
mutex_unlock(&fb_helper->lock);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 44a948b80ee1..d4bbc5d109c8 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1161,7 +1161,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin(struct drm_gem_object *obj)
+int drm_gem_pin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->pin)
return obj->funcs->pin(obj);
@@ -1169,12 +1169,30 @@ int drm_gem_pin(struct drm_gem_object *obj)
return 0;
}
-void drm_gem_unpin(struct drm_gem_object *obj)
+void drm_gem_unpin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->unpin)
obj->funcs->unpin(obj);
}
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ int ret;
+
+ dma_resv_lock(obj->resv, NULL);
+ ret = drm_gem_pin_locked(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ dma_resv_lock(obj->resv, NULL);
+ drm_gem_unpin_locked(obj);
+ dma_resv_unlock(obj->resv);
+}
+
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1209,6 +1227,18 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
+void drm_gem_lock(struct drm_gem_object *obj)
+{
+ dma_resv_lock(obj->resv, NULL);
+}
+EXPORT_SYMBOL(drm_gem_lock);
+
+void drm_gem_unlock(struct drm_gem_object *obj)
+{
+ dma_resv_unlock(obj->resv);
+}
+EXPORT_SYMBOL(drm_gem_unlock);
+
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e440f458b663..93337543aac3 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
- drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
- &new_shadow_plane_state->fmtcnv_state);
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index e435f986cd13..177773bcdbfd 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,7 +10,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -228,7 +227,7 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
-static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
int ret;
@@ -238,13 +237,15 @@ static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
return ret;
}
+EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
-static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
drm_gem_shmem_put_pages(shmem);
}
+EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 1ac284a9e8ee..6027584406af 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -282,6 +282,8 @@ static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx ctx = { false, false };
int ret;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (gbo->bo.pin_count)
goto out;
@@ -337,6 +339,8 @@ EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
+ dma_resv_assert_held(gbo->bo.base.resv);
+
ttm_bo_unpin(&gbo->bo);
}
@@ -363,11 +367,28 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
-static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
+/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ * @map: Returns the kernel virtual address of the VRAM GEM object's backing
+ * store.
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code otherwise.
+ */
+int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
{
int ret;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (gbo->vmap_use_count > 0)
goto out;
@@ -388,12 +409,23 @@ out:
return 0;
}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
-static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @map: Kernel virtual address where the VRAM GEM object was mapped
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
+ struct iosys_map *map)
{
struct drm_device *dev = gbo->bo.base.dev;
+ dma_resv_assert_held(gbo->bo.base.resv);
+
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
@@ -410,60 +442,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
* from memory. See drm_gem_vram_bo_driver_move_notify().
*/
}
-
-/**
- * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
- * space
- * @gbo: The GEM VRAM object to map
- * @map: Returns the kernel virtual address of the VRAM GEM object's backing
- * store.
- *
- * The vmap function pins a GEM VRAM object to its current location, either
- * system or video memory, and maps its buffer into kernel address space.
- * As pinned object cannot be relocated, you should avoid pinning objects
- * permanently. Call drm_gem_vram_vunmap() with the returned address to
- * unmap and unpin the GEM VRAM object.
- *
- * Returns:
- * 0 on success, or a negative error code otherwise.
- */
-int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
-{
- int ret;
-
- dma_resv_assert_held(gbo->bo.base.resv);
-
- ret = drm_gem_vram_pin_locked(gbo, 0);
- if (ret)
- return ret;
- ret = drm_gem_vram_kmap_locked(gbo, map);
- if (ret)
- goto err_drm_gem_vram_unpin_locked;
-
- return 0;
-
-err_drm_gem_vram_unpin_locked:
- drm_gem_vram_unpin_locked(gbo);
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_vram_vmap);
-
-/**
- * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
- * @gbo: The GEM VRAM object to unmap
- * @map: Kernel virtual address where the VRAM GEM object was mapped
- *
- * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
- * the documentation for drm_gem_vram_vmap() for more information.
- */
-void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
- struct iosys_map *map)
-{
- dma_resv_assert_held(gbo->bo.base.resv);
-
- drm_gem_vram_kunmap_locked(gbo, map);
- drm_gem_vram_unpin_locked(gbo);
-}
EXPORT_SYMBOL(drm_gem_vram_vunmap);
/**
@@ -768,7 +746,8 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- /* Fbdev console emulation is the use case of these PRIME
+ /*
+ * Fbdev console emulation is the use case of these PRIME
* helpers. This may involve updating a hardware buffer from
* a shadow FB. We pin the buffer to it's current location
* (either video RAM or system memory) to prevent it from
@@ -776,7 +755,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
* the buffer to be pinned to VRAM, implement a callback that
* sets the flags accordingly.
*/
- return drm_gem_vram_pin(gbo, 0);
+ return drm_gem_vram_pin_locked(gbo, 0);
}
/**
@@ -787,7 +766,7 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_unpin_locked(gbo);
}
/**
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8e4faf0a28e6..690505a1f7a5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,6 +21,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#ifndef __DRM_INTERNAL_H__
+#define __DRM_INTERNAL_H__
+
#include <linux/kthread.h>
#include <linux/types.h>
@@ -32,7 +35,6 @@
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-struct cea_sad;
struct dentry;
struct dma_buf;
struct iosys_map;
@@ -170,6 +172,8 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
+int drm_gem_pin_locked(struct drm_gem_object *obj);
+void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
@@ -273,6 +277,4 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
void drm_framebuffer_debugfs_init(struct drm_device *dev);
-/* drm_edid.c */
-void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
-void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
+#endif /* __DRM_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index ef6e416522f8..795001bb7ff1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -645,29 +645,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
/**
- * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * mipi_dsi_compression_mode_ext() - enable/disable DSC on the peripheral
* @dsi: DSI peripheral device
* @enable: Whether to enable or disable the DSC
+ * @algo: Selected compression algorithm
+ * @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries
*
- * Enable or disable Display Stream Compression on the peripheral using the
- * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ * Enable or disable Display Stream Compression on the peripheral.
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector)
{
- /* Note: Needs updating for non-default PPS or algorithm */
- u8 tx[2] = { enable << 0, 0 };
+ u8 tx[2] = { };
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_COMPRESSION_MODE,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
- int ret = mipi_dsi_device_transfer(dsi, &msg);
+ int ret;
+
+ if (algo > 3 || pps_selector > 3)
+ return -EINVAL;
+
+ tx[0] = (enable << 0) |
+ (algo << 1) |
+ (pps_selector << 4);
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
return (ret < 0) ? ret : 0;
}
+EXPORT_SYMBOL(mipi_dsi_compression_mode_ext);
+
+/**
+ * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * @dsi: DSI peripheral device
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+{
+ return mipi_dsi_compression_mode_ext(dsi, enable, MIPI_DSI_COMPRESSION_DSC, 0);
+}
EXPORT_SYMBOL(mipi_dsi_compression_mode);
/**
@@ -679,8 +706,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps)
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 48fd2d67f352..568972258222 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -372,6 +372,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.modifiers_property = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "SIZE_HINTS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.size_hints_property = prop;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c4f88c3a93b7..2d8b0371619d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -373,8 +373,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hact_duration_ns < params->hact_ns.min ||
hact_duration_ns > params->hact_ns.max)) {
- DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
- hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
+ drm_err(dev, "Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
+ hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
return -EINVAL;
}
@@ -385,8 +385,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hblk_duration_ns < params->hblk_ns.min ||
hblk_duration_ns > params->hblk_ns.max)) {
- DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
- hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
+ drm_err(dev, "Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
+ hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
return -EINVAL;
}
@@ -397,8 +397,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hslen_duration_ns < params->hslen_ns.min ||
hslen_duration_ns > params->hslen_ns.max)) {
- DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
- hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
+ drm_err(dev, "Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
+ hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
return -EINVAL;
}
@@ -409,7 +409,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
- DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
+ drm_err(dev, "Invalid horizontal porches duration: %uns\n",
+ porches_duration_ns);
return -EINVAL;
}
@@ -431,8 +432,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hfp_duration_ns < params->hfp_ns.min ||
hfp_duration_ns > params->hfp_ns.max)) {
- DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
- hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
+ drm_err(dev, "Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
+ hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
return -EINVAL;
}
@@ -443,8 +444,8 @@ static int fill_analog_mode(struct drm_device *dev,
if (!bt601 &&
(hbp_duration_ns < params->hbp_ns.min ||
hbp_duration_ns > params->hbp_ns.max)) {
- DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
- hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
+ drm_err(dev, "Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
+ hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
return -EINVAL;
}
@@ -495,8 +496,8 @@ static int fill_analog_mode(struct drm_device *dev,
vtotal = vactive + vfp + vslen + vbp;
if (params->num_lines != vtotal) {
- DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
- vtotal, params->num_lines);
+ drm_err(dev, "Invalid vertical total: %upx (expected %upx)\n",
+ vtotal, params->num_lines);
return -EINVAL;
}
@@ -1200,9 +1201,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%pOF: got %dx%d display mode\n",
- np, vm.hactive, vm.vactive);
- drm_mode_debug_printmodeline(dmode);
+ pr_debug("%pOF: got %dx%d display mode: " DRM_MODE_FMT "\n",
+ np, vm.hactive, vm.vactive, DRM_MODE_ARG(dmode));
return 0;
}
@@ -1250,7 +1250,7 @@ int of_get_drm_panel_display_mode(struct device_node *np,
dmode->width_mm = width_mm;
dmode->height_mm = height_mm;
- drm_mode_debug_printmodeline(dmode);
+ pr_debug(DRM_MODE_FMT "\n", DRM_MODE_ARG(dmode));
return 0;
}
@@ -1812,10 +1812,8 @@ void drm_mode_prune_invalid(struct drm_device *dev,
DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
if (verbose) {
- drm_mode_debug_printmodeline(mode);
- DRM_DEBUG_KMS("Not using %s mode: %s\n",
- mode->name,
- drm_get_mode_status_name(mode->status));
+ drm_dbg_kms(dev, "Rejected mode: " DRM_MODE_FMT " (%s)\n",
+ DRM_MODE_ARG(mode), drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
new file mode 100644
index 000000000000..7ece67086cec
--- /dev/null
+++ b/drivers/gpu/drm/drm_panic.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ * inspired by the drm_log driver from David Herrmann <dh.herrmann@gmail.com>
+ * Tux Ascii art taken from cowsay written by Tony Monroe
+ */
+
+#include <linux/font.h>
+#include <linux/iosys-map.h>
+#include <linux/kdebug.h>
+#include <linux/kmsg_dump.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+
+MODULE_AUTHOR("Jocelyn Falempe");
+MODULE_DESCRIPTION("DRM panic handler");
+MODULE_LICENSE("GPL");
+
+/**
+ * DOC: overview
+ *
+ * To enable DRM panic for a driver, the primary plane must implement a
+ * &drm_plane_helper_funcs.get_scanout_buffer helper function. It is then
+ * automatically registered to the drm panic handler.
+ * When a panic occurs, the &drm_plane_helper_funcs.get_scanout_buffer will be
+ * called, and the driver can provide a framebuffer so the panic handler can
+ * draw the panic screen on it. Currently only linear buffer and a few color
+ * formats are supported.
+ * Optionally the driver can also provide a &drm_plane_helper_funcs.panic_flush
+ * callback, that will be called after that, to send additional commands to the
+ * hardware to make the scanout buffer visible.
+ */
+
+/*
+ * This module displays a user friendly message on screen when a kernel panic
+ * occurs. This is conflicting with fbcon, so you can only enable it when fbcon
+ * is disabled.
+ * It's intended for end-user, so have minimal technical/debug information.
+ *
+ * Implementation details:
+ *
+ * It is a panic handler, so it can't take lock, allocate memory, run tasks/irq,
+ * or attempt to sleep. It's a best effort, and it may not be able to display
+ * the message in all situations (like if the panic occurs in the middle of a
+ * modesetting).
+ * It will display only one static frame, so performance optimizations are low
+ * priority as the machine is already in an unusable state.
+ */
+
+struct drm_panic_line {
+ u32 len;
+ const char *txt;
+};
+
+#define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s}
+
+static struct drm_panic_line panic_msg[] = {
+ PANIC_LINE("KERNEL PANIC !"),
+ PANIC_LINE(""),
+ PANIC_LINE("Please reboot your computer."),
+};
+
+static const struct drm_panic_line logo[] = {
+ PANIC_LINE(" .--. _"),
+ PANIC_LINE(" |o_o | | |"),
+ PANIC_LINE(" |:_/ | | |"),
+ PANIC_LINE(" // \\ \\ |_|"),
+ PANIC_LINE(" (| | ) _"),
+ PANIC_LINE(" /'\\_ _/`\\ (_)"),
+ PANIC_LINE(" \\___)=(___/"),
+};
+
+/*
+ * Color conversion
+ */
+
+static u16 convert_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+}
+
+static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u32 convert_xrgb8888_to_argb8888(u32 pix)
+{
+ return pix | GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+}
+
+static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+/*
+ * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+ * @color: input color, in xrgb8888 format
+ * @format: output format
+ *
+ * Returns:
+ * Color in the format specified, casted to u32.
+ * Or 0 if the format is not supported.
+ */
+static u32 convert_from_xrgb8888(u32 color, u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ return convert_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+ return convert_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+ return convert_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+ return convert_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+ return convert_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+ return convert_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+ return convert_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+ return convert_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+ return convert_xrgb8888_to_argb2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+ }
+}
+
+/*
+ * Blit & Fill
+ */
+static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u16 fg16, u16 bg16)
+{
+ unsigned int y, x;
+ u16 val16;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ val16 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg16 : bg16;
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, val16);
+ }
+ }
+}
+
+static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg32, u32 bg32)
+{
+ unsigned int y, x;
+ u32 val32;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ u32 off = y * dpitch + x * 3;
+
+ val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (val32 & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (val32 & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (val32 & 0x00FF0000) >> 16);
+ }
+ }
+}
+
+static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg32, u32 bg32)
+{
+ unsigned int y, x;
+ u32 val32;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32;
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, val32);
+ }
+ }
+}
+
+/*
+ * drm_panic_blit - convert a monochrome image to a linear framebuffer
+ * @dmap: destination iosys_map
+ * @dpitch: destination pitch in bytes
+ * @sbuf8: source buffer, in monochrome format, 8 pixels per byte.
+ * @spitch: source pitch in bytes
+ * @height: height of the image to copy, in pixels
+ * @width: width of the image to copy, in pixels
+ * @fg_color: foreground color, in destination format
+ * @bg_color: background color, in destination format
+ * @pixel_width: pixel width in bytes.
+ *
+ * This can be used to draw a font character, which is a monochrome image, to a
+ * framebuffer in other supported format.
+ */
+static void drm_panic_blit(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ u32 fg_color, u32 bg_color,
+ unsigned int pixel_width)
+{
+ switch (pixel_width) {
+ case 2:
+ drm_panic_blit16(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ case 3:
+ drm_panic_blit24(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ case 4:
+ drm_panic_blit32(dmap, dpitch, sbuf8, spitch,
+ height, width, fg_color, bg_color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", pixel_width);
+ }
+}
+
+static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+}
+
+static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ unsigned int off = y * dpitch + x * 3;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+ }
+ }
+}
+
+static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+}
+
+/*
+ * drm_panic_fill - Fill a rectangle with a color
+ * @dmap: destination iosys_map, pointing to the top left corner of the rectangle
+ * @dpitch: destination pitch in bytes
+ * @height: height of the rectangle, in pixels
+ * @width: width of the rectangle, in pixels
+ * @color: color to fill the rectangle.
+ * @pixel_width: pixel width in bytes
+ *
+ * Fill a rectangle with a color, in a linear framebuffer.
+ */
+static void drm_panic_fill(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color, unsigned int pixel_width)
+{
+ switch (pixel_width) {
+ case 2:
+ drm_panic_fill16(dmap, dpitch, height, width, color);
+ break;
+ case 3:
+ drm_panic_fill24(dmap, dpitch, height, width, color);
+ break;
+ case 4:
+ drm_panic_fill32(dmap, dpitch, height, width, color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't fill with pixel width %d\n", pixel_width);
+ }
+}
+
+static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
+{
+ return font->data + (c * font->height) * font_pitch;
+}
+
+static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
+{
+ int i;
+ unsigned int max = 0;
+
+ for (i = 0; i < len; i++)
+ max = max(lines[i].len, max);
+ return max;
+}
+
+/*
+ * Draw a text in a rectangle on a framebuffer. The text is truncated if it overflows the rectangle
+ */
+static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
+ const struct font_desc *font,
+ const struct drm_panic_line *msg,
+ unsigned int msg_lines,
+ bool centered,
+ struct drm_rect *clip,
+ u32 fg_color,
+ u32 bg_color)
+{
+ int i, j;
+ const u8 *src;
+ size_t font_pitch = DIV_ROUND_UP(font->width, 8);
+ struct iosys_map dst;
+ unsigned int px_width = sb->format->cpp[0];
+ int left = 0;
+
+ msg_lines = min(msg_lines, drm_rect_height(clip) / font->height);
+ for (i = 0; i < msg_lines; i++) {
+ size_t line_len = min(msg[i].len, drm_rect_width(clip) / font->width);
+
+ if (centered)
+ left = (drm_rect_width(clip) - (line_len * font->width)) / 2;
+
+ dst = sb->map[0];
+ iosys_map_incr(&dst, (clip->y1 + i * font->height) * sb->pitch[0] +
+ (clip->x1 + left) * px_width);
+ for (j = 0; j < line_len; j++) {
+ src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ drm_panic_blit(&dst, sb->pitch[0], src, font_pitch,
+ font->height, font->width,
+ fg_color, bg_color, px_width);
+ iosys_map_incr(&dst, font->width * px_width);
+ }
+ }
+}
+
+/*
+ * Draw the panic message at the center of the screen
+ */
+static void draw_panic_static(struct drm_scanout_buffer *sb)
+{
+ size_t msg_lines = ARRAY_SIZE(panic_msg);
+ size_t logo_lines = ARRAY_SIZE(logo);
+ u32 fg_color = CONFIG_DRM_PANIC_FOREGROUND_COLOR;
+ u32 bg_color = CONFIG_DRM_PANIC_BACKGROUND_COLOR;
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_logo, r_msg;
+
+ if (!font)
+ return;
+
+ fg_color = convert_from_xrgb8888(fg_color, sb->format->format);
+ bg_color = convert_from_xrgb8888(bg_color, sb->format->format);
+
+ r_logo = DRM_RECT_INIT(0, 0,
+ get_max_line_len(logo, logo_lines) * font->width,
+ logo_lines * font->height);
+ r_msg = DRM_RECT_INIT(0, 0,
+ min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width),
+ min(msg_lines * font->height, sb->height));
+
+ /* Center the panic message */
+ drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2);
+
+ /* Fill with the background color, and draw text on top */
+ drm_panic_fill(&sb->map[0], sb->pitch[0], sb->height, sb->width,
+ bg_color, sb->format->cpp[0]);
+
+ if ((r_msg.x1 >= drm_rect_width(&r_logo) || r_msg.y1 >= drm_rect_height(&r_logo)) &&
+ drm_rect_width(&r_logo) < sb->width && drm_rect_height(&r_logo) < sb->height) {
+ draw_txt_rectangle(sb, font, logo, logo_lines, false, &r_logo, fg_color, bg_color);
+ }
+ draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color, bg_color);
+}
+
+/*
+ * drm_panic_is_format_supported()
+ * @format: a fourcc color code
+ * Returns: true if supported, false otherwise.
+ *
+ * Check if drm_panic will be able to use this color format.
+ */
+static bool drm_panic_is_format_supported(const struct drm_format_info *format)
+{
+ if (format->num_planes != 1)
+ return false;
+ return convert_from_xrgb8888(0xffffff, format->format) != 0;
+}
+
+static void draw_panic_plane(struct drm_plane *plane)
+{
+ struct drm_scanout_buffer sb;
+ int ret;
+ unsigned long flags;
+
+ if (!drm_panic_trylock(plane->dev, flags))
+ return;
+
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (!ret && drm_panic_is_format_supported(sb.format)) {
+ draw_panic_static(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
+ }
+ drm_panic_unlock(plane->dev, flags);
+}
+
+static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd)
+{
+ return container_of(kd, struct drm_plane, kmsg_panic);
+}
+
+static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason)
+{
+ struct drm_plane *plane = to_drm_plane(dumper);
+
+ if (reason == KMSG_DUMP_PANIC)
+ draw_panic_plane(plane);
+}
+
+
+/*
+ * DEBUG FS, This is currently unsafe.
+ * Create one file per plane, so it's possible to debug one plane at a time.
+ * TODO: It would be better to emulate an NMI context.
+ */
+#ifdef CONFIG_DRM_PANIC_DEBUG
+#include <linux/debugfs.h>
+
+static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ bool run;
+
+ if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) {
+ struct drm_plane *plane = file->private_data;
+
+ draw_panic_plane(plane);
+ }
+ return count;
+}
+
+static const struct file_operations dbg_drm_panic_ops = {
+ .owner = THIS_MODULE,
+ .write = debugfs_trigger_write,
+ .open = simple_open,
+};
+
+static void debugfs_register_plane(struct drm_plane *plane, int index)
+{
+ char fname[32];
+
+ snprintf(fname, 32, "drm_panic_plane_%d", index);
+ debugfs_create_file(fname, 0200, plane->dev->debugfs_root,
+ plane, &dbg_drm_panic_ops);
+}
+#else
+static void debugfs_register_plane(struct drm_plane *plane, int index) {}
+#endif /* CONFIG_DRM_PANIC_DEBUG */
+
+/**
+ * drm_panic_register() - Initialize DRM panic for a device
+ * @dev: the drm device on which the panic screen will be displayed.
+ */
+void drm_panic_register(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+ int registered_plane = 0;
+
+ if (!dev->mode_config.num_total_plane)
+ return;
+
+ drm_for_each_plane(plane, dev) {
+ if (!plane->helper_private || !plane->helper_private->get_scanout_buffer)
+ continue;
+ plane->kmsg_panic.dump = drm_panic;
+ plane->kmsg_panic.max_reason = KMSG_DUMP_PANIC;
+ if (kmsg_dump_register(&plane->kmsg_panic))
+ drm_warn(dev, "Failed to register panic handler\n");
+ else {
+ debugfs_register_plane(plane, registered_plane);
+ registered_plane++;
+ }
+ }
+ if (registered_plane)
+ drm_info(dev, "Registered %d planes with drm panic\n", registered_plane);
+}
+EXPORT_SYMBOL(drm_panic_register);
+
+/**
+ * drm_panic_unregister()
+ * @dev: the drm device previously registered.
+ */
+void drm_panic_unregister(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ if (!dev->mode_config.num_total_plane)
+ return;
+
+ drm_for_each_plane(plane, dev) {
+ if (!plane->helper_private || !plane->helper_private->get_scanout_buffer)
+ continue;
+ kmsg_dump_unregister(&plane->kmsg_panic);
+ }
+}
+EXPORT_SYMBOL(drm_panic_unregister);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 672c655c7a8e..57662a1fd345 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -140,6 +140,25 @@
* DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
+ *
+ * SIZE_HINTS:
+ * Blob property which contains the set of recommended plane size
+ * which can used for simple "cursor like" use cases (eg. no scaling).
+ * Using these hints frees userspace from extensive probing of
+ * supported plane sizes through atomic/setcursor ioctls.
+ *
+ * The blob contains an array of struct drm_plane_size_hint, in
+ * order of preference. For optimal usage userspace should pick
+ * the first size that satisfies its own requirements.
+ *
+ * Drivers should only attach this property to planes that
+ * support a very limited set of sizes.
+ *
+ * Note that property value 0 (ie. no blob) is reserved for potential
+ * future use. Current userspace is expected to ignore the property
+ * if the value is 0, and fall back to some other means (eg.
+ * &DRM_CAP_CURSOR_WIDTH and &DRM_CAP_CURSOR_HEIGHT) to determine
+ * the appropriate plane size to use.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -1729,3 +1748,40 @@ int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_create_scaling_filter_property);
+
+/**
+ * drm_plane_add_size_hints_property - create a size hints property
+ *
+ * @plane: drm plane
+ * @hints: size hints
+ * @num_hints: number of size hints
+ *
+ * Create a size hints property for the plane.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_add_size_hints_property(struct drm_plane *plane,
+ const struct drm_plane_size_hint *hints,
+ int num_hints)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
+
+ /* extending to other plane types needs actual thought */
+ if (drm_WARN_ON(dev, plane->type != DRM_PLANE_TYPE_CURSOR))
+ return -EINVAL;
+
+ blob = drm_property_create_blob(dev,
+ array_size(sizeof(hints[0]), num_hints),
+ hints);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_object_attach_property(&plane->base, config->size_hints_property,
+ blob->base.id);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_add_size_hints_property);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7352bde299d5..03bd3c7bd0dc 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
- if (!obj->funcs->get_sg_table)
+ /*
+ * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
+ * that implement their own ->map_dma_buf() do not.
+ */
+ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
+ !obj->funcs->get_sg_table)
return -ENOSYS;
return drm_gem_pin(obj);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 699b7dbffd7b..cf2efb44722c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,13 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#include <linux/stdarg.h>
-
+#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/dynamic_debug.h>
+#include <linux/stdarg.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bf2dd1f46b6c..4f75a1cfd820 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -566,8 +567,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_modeset_acquire_init(&ctx, 0);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
@@ -610,11 +611,10 @@ retry:
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
/*
* The hotplug event code might call into the fb
@@ -637,8 +637,8 @@ retry:
drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, connector->name);
drm_connector_update_edid_property(connector, NULL);
drm_mode_prune_invalid(dev, &connector->modes, false);
goto exit;
@@ -696,11 +696,13 @@ exit:
drm_mode_sort(&connector->modes);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] probed modes:\n",
+ connector->base.id, connector->name);
+
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_debug_printmodeline(mode);
+ drm_dbg_kms(dev, "Probed mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(mode));
}
return count;
@@ -833,14 +835,12 @@ static void output_poll_execute(struct work_struct *work)
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
- "status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- old, new);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
- connector->base.id, connector->name,
- old_epoch_counter, connector->epoch_counter);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id, connector->name,
+ old, new);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
+ connector->base.id, connector->name,
+ old_epoch_counter, connector->epoch_counter);
changed = true;
}
@@ -951,6 +951,32 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
+{
+ drm_kms_helper_poll_fini(dev);
+}
+
+/**
+ * drmm_kms_helper_poll_init - initialize and enable output polling
+ * @dev: drm_device
+ *
+ * This function initializes and then also enables output polling support for
+ * @dev similar to drm_kms_helper_poll_init(). Polling will automatically be
+ * cleaned up when the DRM device goes away.
+ *
+ * See drm_kms_helper_poll_init() for more information.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drmm_kms_helper_poll_init(struct drm_device *dev)
+{
+ drm_kms_helper_poll_init(dev);
+
+ return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+}
+EXPORT_SYMBOL(drmm_kms_helper_poll_init);
+
static bool check_connector_changed(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1279,3 +1305,32 @@ int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
return i;
}
EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
+
+/**
+ * drm_connector_helper_detect_from_ddc - Read EDID and detect connector status.
+ * @connector: The connector
+ * @ctx: Acquire context
+ * @force: Perform screen-destructive operations, if necessary
+ *
+ * Detects the connector status by reading the EDID using drm_probe_ddc(),
+ * which requires connector->ddc to be set. Returns connector_status_connected
+ * on success or connector_status_disconnected on failure.
+ *
+ * Returns:
+ * The connector status as defined by enum drm_connector_status.
+ */
+int drm_connector_helper_detect_from_ddc(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct i2c_adapter *ddc = connector->ddc;
+
+ if (!ddc)
+ return connector_status_unknown;
+
+ if (drm_probe_ddc(ddc))
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+EXPORT_SYMBOL(drm_connector_helper_detect_from_ddc);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index a953f69a34b6..bd9b8ab4f82b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -209,10 +209,9 @@ static ssize_t status_store(struct device *device,
ret = -EINVAL;
if (old_force != connector->force || !connector->force) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
- connector->base.id,
- connector->name,
- old_force, connector->force);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n",
+ connector->base.id, connector->name,
+ old_force, connector->force);
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
@@ -383,8 +382,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
if (r)
goto err_free;
- DRM_DEBUG("adding \"%s\" to sysfs\n",
- connector->name);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] adding connector to sysfs\n",
+ connector->base.id, connector->name);
r = device_add(kdev);
if (r) {
@@ -430,8 +429,9 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
if (dev_fwnode(connector->kdev))
component_del(connector->kdev, &typec_connector_ops);
- DRM_DEBUG("removing \"%s\" from sysfs\n",
- connector->name);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] removing connector from sysfs\n",
+ connector->base.id, connector->name);
device_unregister(connector->kdev);
connector->kdev = NULL;
@@ -442,7 +442,7 @@ void drm_sysfs_lease_event(struct drm_device *dev)
char *event_string = "LEASE=1";
char *envp[] = { event_string, NULL };
- DRM_DEBUG("generating lease event\n");
+ drm_dbg_lease(dev, "generating lease event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
@@ -463,7 +463,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
char *event_string = "HOTPLUG=1";
char *envp[] = { event_string, NULL };
- DRM_DEBUG("generating hotplug event\n");
+ drm_dbg_kms(dev, "generating hotplug event\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 702a12bc93bd..cc3571e25a9a 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -166,11 +166,24 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+static struct drm_vblank_crtc *
+drm_vblank_crtc(struct drm_device *dev, unsigned int pipe)
+{
+ return &dev->vblank[pipe];
+}
+
+struct drm_vblank_crtc *
+drm_crtc_vblank_crtc(struct drm_crtc *crtc)
+{
+ return drm_vblank_crtc(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_crtc);
+
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
ktime_t t_vblank, u32 last)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
assert_spin_locked(&dev->vblank_time_lock);
@@ -184,7 +197,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
return vblank->max_vblank_count ?: dev->max_vblank_count;
}
@@ -273,7 +286,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u32 cur_vblank, diff;
bool rc;
ktime_t t_vblank;
@@ -364,7 +377,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u64 count;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
@@ -438,7 +451,7 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
*/
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
assert_spin_locked(&dev->vbl_lock);
@@ -600,7 +613,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
int linedur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
@@ -930,7 +943,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
ktime_t *vblanktime)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
u64 vblank_count;
unsigned int seq;
@@ -985,7 +998,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
*/
int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
{
- unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank;
struct drm_display_mode *mode;
u64 vblank_start;
@@ -993,7 +1005,7 @@ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
if (!drm_dev_has_vblank(crtc->dev))
return -EINVAL;
- vblank = &crtc->dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
mode = &vblank->hwmode;
if (!vblank->framedur_ns || !vblank->linedur_ns)
@@ -1147,7 +1159,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
@@ -1185,7 +1197,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
int ret = 0;
@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1274,7 +1286,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
*/
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
int ret;
u64 last;
@@ -1327,7 +1339,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1405,8 +1417,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
spin_lock_irq(&dev->vbl_lock);
/*
@@ -1445,8 +1456,7 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
drm_WARN_ON(dev, dev->max_vblank_count);
drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset));
@@ -1469,7 +1479,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1512,7 +1522,7 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vbl_lock);
assert_spin_locked(&dev->vblank_time_lock);
- vblank = &dev->vblank[pipe];
+ vblank = drm_vblank_crtc(dev, pipe);
drm_WARN_ONCE(dev,
drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
@@ -1564,7 +1574,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
struct drm_pending_vblank_event *e;
ktime_t now;
u64 seq;
@@ -1872,7 +1882,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
*/
bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
unsigned long irqflags;
bool disable_irq;
@@ -1981,7 +1991,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
- vblank = &dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
@@ -2046,7 +2056,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
- vblank = &dev->vblank[pipe];
+ vblank = drm_crtc_vblank_crtc(crtc);
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 43cd5c0f4f6f..4fe9b1d3b00f 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -245,7 +245,7 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
{
kthread_init_work(&work->base, func);
INIT_LIST_HEAD(&work->node);
- work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+ work->vblank = drm_crtc_vblank_crtc(crtc);
}
EXPORT_SYMBOL(drm_vblank_work_init);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 734412aae94d..a9bf426f69b3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
- case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
- *value = gpu->identity.nn_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
- *value = gpu->identity.nn_mad_per_core;
- break;
-
- case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
- *value = gpu->identity.tp_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
- *value = gpu->identity.on_chip_sram_size;
- break;
-
- case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
- *value = gpu->identity.axi_sram_size;
- break;
-
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7d5e9158e13c..197e0037732e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
- /* Number of MAD units per Neural Network core. */
- u32 nn_mad_per_core;
-
- /* Number of Tensor Processing cores. */
- u32 tp_core_count;
-
- /* Size in bytes of the SRAM inside the NPU. */
- u32 on_chip_sram_size;
-
- /* Size in bytes of the SRAM across the AXI bus. */
- u32 axi_sram_size;
-
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index d8e7334de8ce..8665f2658d51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
- .nn_mad_per_core = 64,
- .tp_core_count = 4,
- .on_chip_sram_size = 524288,
- .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
- .nn_mad_per_core = 64,
- .tp_core_count = 3,
- .on_chip_sram_size = 262144,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 733b109a5095..58cd77220741 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -4,7 +4,6 @@ config DRM_EXYNOS
depends on OF && DRM && COMMON_CLK
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on MMU
- select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
@@ -68,8 +67,9 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER=y || (DRM_DISPLAY_HELPER=m && DRM_EXYNOS=m)
select DRM_ANALOGIX_DP
- select DRM_DISPLAY_DP_HELPER
default DRM_EXYNOS
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 2fe0e5f3f638..bf16deaae68b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -184,7 +184,6 @@ struct platform_driver dsi_driver = {
.remove_new = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
- .owner = THIS_MODULE,
.pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e81a576de398..142184c8c3bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1412,7 +1412,6 @@ struct platform_driver fimc_driver = {
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
- .owner = THIS_MODULE,
.pm = pm_ptr(&fimc_pm_ops),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index f2145227a1e0..f57df8c48139 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1326,7 +1326,6 @@ struct platform_driver fimd_driver = {
.remove_new = fimd_remove,
.driver = {
.name = "exynos4-fb",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f3138423612e..3a3b2c00e400 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1610,7 +1610,6 @@ struct platform_driver g2d_driver = {
.remove_new = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
- .owner = THIS_MODULE,
.pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 180507a47700..1b111e2c3347 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1423,7 +1423,6 @@ struct platform_driver gsc_driver = {
.remove_new = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
- .owner = THIS_MODULE,
.pm = &gsc_pm_ops,
.of_match_table = exynos_drm_gsc_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e2920960180f..d61ec451807c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -464,7 +464,6 @@ struct platform_driver mic_driver = {
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
- .owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 5f7516655b08..2eb0b701672f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -454,7 +454,6 @@ struct platform_driver rotator_driver = {
.remove_new = rotator_remove,
.driver = {
.name = "exynos-rotator",
- .owner = THIS_MODULE,
.pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 392f721f13ab..a9d469896824 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -722,7 +722,6 @@ struct platform_driver scaler_driver = {
.remove_new = scaler_remove,
.driver = {
.name = "exynos-scaler",
- .owner = THIS_MODULE,
.pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5bbba9ad225..fab135308b70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -479,7 +479,6 @@ struct platform_driver vidi_driver = {
.remove_new = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
- .owner = THIS_MODULE,
.dev_groups = vidi_groups,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b1d02dec3774..e968824a4c72 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1919,10 +1919,9 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
static int hdmi_get_phy_io(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4212-hdmiphy";
- struct device_node *np;
- int ret = 0;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, compatible_str);
- np = of_find_compatible_node(NULL, NULL, compatible_str);
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
@@ -1937,21 +1936,17 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (!hdata->regs_hdmiphy) {
DRM_DEV_ERROR(hdata->dev,
"failed to ioremap hdmi phy\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
} else {
hdata->hdmiphy_port = of_find_i2c_device_by_node(np);
if (!hdata->hdmiphy_port) {
DRM_INFO("Failed to get hdmi phy i2c client\n");
- ret = -EPROBE_DEFER;
- goto out;
+ return -EPROBE_DEFER;
}
}
-out:
- of_node_put(np);
- return ret;
+ return 0;
}
static int hdmi_probe(struct platform_device *pdev)
@@ -2126,7 +2121,6 @@ struct platform_driver hdmi_driver = {
.remove_new = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
- .owner = THIS_MODULE,
.pm = &exynos_hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6822333fd0e6..1db955f00044 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1331,7 +1331,6 @@ static const struct dev_pm_ops exynos_mixer_pm_ops = {
struct platform_driver mixer_driver = {
.driver = {
.name = "exynos-mixer",
- .owner = THIS_MODULE,
.pm = &exynos_mixer_pm_ops,
.of_match_table = mixer_match_types,
},
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e1a6..58fed80c7392 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index d974d0c60d2a..72191d6f0d06 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -11,8 +11,6 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
-#include <asm/intel-mid.h>
-
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89d4a..6dece8f0e380 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa4cc..83c17689c454 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe392636..000000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 034e78360d4f..0f07d77c5d52 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -221,7 +221,7 @@ static int gud_connector_get_modes(struct drm_connector *connector)
struct gud_display_mode_req *reqmodes = NULL;
struct gud_connector_get_edid_ctx edid_ctx;
unsigned int i, num_modes = 0;
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
int idx, ret;
if (!drm_dev_enter(connector->dev, &idx))
@@ -238,13 +238,13 @@ static int gud_connector_get_modes(struct drm_connector *connector)
gud_conn_err(connector, "Invalid EDID size", ret);
} else if (ret > 0) {
edid_ctx.len = ret;
- edid = drm_do_get_edid(connector, gud_connector_get_edid_block, &edid_ctx);
+ drm_edid = drm_edid_read_custom(connector, gud_connector_get_edid_block, &edid_ctx);
}
kfree(edid_ctx.buf);
- drm_connector_update_edid_property(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
- if (edid && edid_ctx.edid_override)
+ if (drm_edid && edid_ctx.edid_override)
goto out;
reqmodes = kmalloc_array(GUD_CONNECTOR_MAX_NUM_MODES, sizeof(*reqmodes), GFP_KERNEL);
@@ -276,10 +276,10 @@ static int gud_connector_get_modes(struct drm_connector *connector)
}
out:
if (!num_modes)
- num_modes = drm_add_edid_modes(connector, edid);
+ num_modes = drm_edid_connector_add_modes(connector);
kfree(reqmodes);
- kfree(edid);
+ drm_edid_free(drm_edid);
drm_dev_exit(idx);
return num_modes;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 5932024f8f95..4f0d18a16b0f 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -2,6 +2,10 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on X86 && PCI
depends on !PREEMPT_RT
select INTEL_GTT if X86
@@ -10,10 +14,6 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index bc18e2d9ea05..d8397065c3f0 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,8 +27,8 @@ config DRM_I915_DEBUG
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
- select DRM_DP_AUX_CHARDEV
- select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
+ select DRM_DISPLAY_DP_AUX_CHARDEV
+ select DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3ef6ed41e62b..7cad944b825c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,11 +32,6 @@ endif
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
-# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
@@ -118,6 +113,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
@@ -270,6 +266,7 @@ i915-y += \
display/intel_display_rps.o \
display/intel_display_wa.o \
display/intel_dmc.o \
+ display/intel_dmc_wl.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
diff --git a/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
new file mode 100644
index 000000000000..275f4d9c3fb0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __BXT_DPIO_PHY_REGS_H__
+#define __BXT_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* BXT PHY registers */
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define _BXT_PHY2_BASE 0x163000
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
+#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
+ ((lane) & 1) * 0x80)
+#define _MMIO_BXT_PHY_CH_LN(phy, ch, lane, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) + _BXT_LANE_OFFSET(lane))
+
+/* BXT PHY PLL registers */
+#define _PORT_PLL_A 0x46074
+#define _PORT_PLL_B 0x46078
+#define _PORT_PLL_C 0x4607c
+#define PORT_PLL_ENABLE REG_BIT(31)
+#define PORT_PLL_LOCK REG_BIT(30)
+#define PORT_PLL_REF_SEL REG_BIT(27)
+#define PORT_PLL_POWER_ENABLE REG_BIT(26)
+#define PORT_PLL_POWER_STATE REG_BIT(25)
+#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+
+#define _PORT_PLL_EBB_0_A 0x162034
+#define _PORT_PLL_EBB_0_B 0x6C034
+#define _PORT_PLL_EBB_0_C 0x6C340
+#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
+#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
+#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
+
+#define _PORT_PLL_EBB_4_A 0x162038
+#define _PORT_PLL_EBB_4_B 0x6C038
+#define _PORT_PLL_EBB_4_C 0x6C344
+#define PORT_PLL_RECALIBRATE REG_BIT(14)
+#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
+
+#define _PORT_PLL_0_A 0x162100
+#define _PORT_PLL_0_B 0x6C100
+#define _PORT_PLL_0_C 0x6C380
+/* PORT_PLL_0_A */
+#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
+#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
+/* PORT_PLL_1_A */
+#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
+#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
+/* PORT_PLL_2_A */
+#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
+#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
+/* PORT_PLL_3_A */
+#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
+/* PORT_PLL_6_A */
+#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
+#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
+#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
+#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
+/* PORT_PLL_8_A */
+#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
+/* PORT_PLL_9_A */
+#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
+/* PORT_PLL_10_A */
+#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
+#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
+#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
+
+/* BXT PHY common lane registers */
+#define _PORT_CL1CM_DW0_A 0x162000
+#define _PORT_CL1CM_DW0_BC 0x6C000
+#define PHY_POWER_GOOD REG_BIT(16)
+#define PHY_RESERVED REG_BIT(7)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
+
+#define _PORT_CL1CM_DW9_A 0x162024
+#define _PORT_CL1CM_DW9_BC 0x6C024
+#define IREF0RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF0RC_OFFSET(x) REG_FIELD_PREP(IREF0RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
+
+#define _PORT_CL1CM_DW10_A 0x162028
+#define _PORT_CL1CM_DW10_BC 0x6C028
+#define IREF1RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF1RC_OFFSET(x) REG_FIELD_PREP(IREF1RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+
+#define _PORT_CL1CM_DW28_A 0x162070
+#define _PORT_CL1CM_DW28_BC 0x6C070
+#define OCL1_POWER_DOWN_EN REG_BIT(23)
+#define DW28_OLDO_DYN_PWR_DOWN_EN REG_BIT(22)
+#define SUS_CLK_CONFIG REG_GENMASK(1, 0)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
+
+#define _PORT_CL1CM_DW30_A 0x162078
+#define _PORT_CL1CM_DW30_BC 0x6C078
+#define OCL2_LDOFUSE_PWR_DIS REG_BIT(6)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
+#define DW6_OLDO_DYN_PWR_DOWN_EN REG_BIT(28)
+
+/* BXT PHY Ref registers */
+#define _PORT_REF_DW3_A 0x16218C
+#define _PORT_REF_DW3_BC 0x6C18C
+#define GRC_DONE REG_BIT(22)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
+
+#define _PORT_REF_DW6_A 0x162198
+#define _PORT_REF_DW6_BC 0x6C198
+#define GRC_CODE_MASK REG_GENMASK(31, 24)
+#define GRC_CODE(x) REG_FIELD_PREP(GRC_CODE_MASK, (x))
+#define GRC_CODE_FAST_MASK REG_GENMASK(23, 16)
+#define GRC_CODE_FAST(x) REG_FIELD_PREP(GRC_CODE_FAST_MASK, (x))
+#define GRC_CODE_SLOW_MASK REG_GENMASK(15, 8)
+#define GRC_CODE_SLOW(x) REG_FIELD_PREP(GRC_CODE_SLOW_MASK, (x))
+#define GRC_CODE_NOM_MASK REG_GENMASK(7, 0)
+#define GRC_CODE_NOM(x) REG_FIELD_PREP(GRC_CODE_NOM_MASK, (x))
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
+
+#define _PORT_REF_DW8_A 0x1621A0
+#define _PORT_REF_DW8_BC 0x6C1A0
+#define GRC_DIS REG_BIT(15)
+#define GRC_RDY_OVRD REG_BIT(1)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
+
+/* BXT PHY PCS registers */
+#define _PORT_PCS_DW10_LN01_A 0x162428
+#define _PORT_PCS_DW10_LN01_B 0x6C428
+#define _PORT_PCS_DW10_LN01_C 0x6C828
+#define _PORT_PCS_DW10_GRP_A 0x162C28
+#define _PORT_PCS_DW10_GRP_B 0x6CC28
+#define _PORT_PCS_DW10_GRP_C 0x6CE28
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
+#define TX2_SWING_CALC_INIT REG_BIT(31)
+#define TX1_SWING_CALC_INIT REG_BIT(30)
+
+#define _PORT_PCS_DW12_LN01_A 0x162430
+#define _PORT_PCS_DW12_LN01_B 0x6C430
+#define _PORT_PCS_DW12_LN01_C 0x6C830
+#define _PORT_PCS_DW12_LN23_A 0x162630
+#define _PORT_PCS_DW12_LN23_B 0x6C630
+#define _PORT_PCS_DW12_LN23_C 0x6CA30
+#define _PORT_PCS_DW12_GRP_A 0x162c30
+#define _PORT_PCS_DW12_GRP_B 0x6CC30
+#define _PORT_PCS_DW12_GRP_C 0x6CE30
+#define LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define LANE_STAGGER_MASK REG_GENMASK(4, 0)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
+
+/* BXT PHY TX registers */
+#define _PORT_TX_DW2_LN0_A 0x162508
+#define _PORT_TX_DW2_LN0_B 0x6C508
+#define _PORT_TX_DW2_LN0_C 0x6C908
+#define _PORT_TX_DW2_GRP_A 0x162D08
+#define _PORT_TX_DW2_GRP_B 0x6CD08
+#define _PORT_TX_DW2_GRP_C 0x6CF08
+#define BXT_PORT_TX_DW2_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
+#define MARGIN_000_MASK REG_GENMASK(23, 16)
+#define MARGIN_000(x) REG_FIELD_PREP(MARGIN_000_MASK, (x))
+#define UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(UNIQ_TRANS_SCALE_MASK, (x))
+
+#define _PORT_TX_DW3_LN0_A 0x16250C
+#define _PORT_TX_DW3_LN0_B 0x6C50C
+#define _PORT_TX_DW3_LN0_C 0x6C90C
+#define _PORT_TX_DW3_GRP_A 0x162D0C
+#define _PORT_TX_DW3_GRP_B 0x6CD0C
+#define _PORT_TX_DW3_GRP_C 0x6CF0C
+#define BXT_PORT_TX_DW3_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
+#define SCALE_DCOMP_METHOD REG_BIT(26)
+#define UNIQUE_TRANGE_EN_METHOD REG_BIT(27)
+
+#define _PORT_TX_DW4_LN0_A 0x162510
+#define _PORT_TX_DW4_LN0_B 0x6C510
+#define _PORT_TX_DW4_LN0_C 0x6C910
+#define _PORT_TX_DW4_GRP_A 0x162D10
+#define _PORT_TX_DW4_GRP_B 0x6CD10
+#define _PORT_TX_DW4_GRP_C 0x6CF10
+#define BXT_PORT_TX_DW4_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
+#define DE_EMPHASIS_MASK REG_GENMASK(31, 24)
+#define DE_EMPHASIS(x) REG_FIELD_PREP(DE_EMPHASIS_MASK, (x))
+
+#define _PORT_TX_DW5_LN0_A 0x162514
+#define _PORT_TX_DW5_LN0_B 0x6C514
+#define _PORT_TX_DW5_LN0_C 0x6C914
+#define _PORT_TX_DW5_GRP_A 0x162D14
+#define _PORT_TX_DW5_GRP_B 0x6CD14
+#define _PORT_TX_DW5_GRP_C 0x6CF14
+#define BXT_PORT_TX_DW5_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW5_LN0_B, \
+ _PORT_TX_DW5_LN0_C)
+#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_GRP_B, \
+ _PORT_TX_DW5_GRP_C)
+#define DCC_DELAY_RANGE_1 REG_BIT(9)
+#define DCC_DELAY_RANGE_2 REG_BIT(8)
+
+#define _PORT_TX_DW14_LN0_A 0x162538
+#define _PORT_TX_DW14_LN0_B 0x6C538
+#define _PORT_TX_DW14_LN0_C 0x6C938
+#define LATENCY_OPTIM REG_BIT(30)
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C)
+
+#endif /* __BXT_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dfe0b07a122d..06ec04e667e3 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
@@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index eda4a8b88590..79ecfc339430 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
@@ -1615,8 +1616,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 2bb270f82932..7a77ae3dc394 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -62,7 +62,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_digital_connector_state *intel_conn_state =
+ const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
if (property == dev_priv->display.properties.force_audio)
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
index 616e7b1275c4..88ea2740365d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -148,4 +148,20 @@
#define HBLANK_START_COUNT_96 4
#define HBLANK_START_COUNT_128 5
+/* LPE Audio */
+#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
+#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
+
+#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
+#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
+
+#define _VLV_AUD_PORT_EN_B_DBG 0x62F20
+#define _VLV_AUD_PORT_EN_C_DBG 0x62F30
+#define _VLV_AUD_PORT_EN_D_DBG 0x62F34
+#define VLV_AUD_PORT_EN_DBG(port) _MMIO_BASE_PORT3(VLV_DISPLAY_BASE, (port) - PORT_B, \
+ _VLV_AUD_PORT_EN_B_DBG, \
+ _VLV_AUD_PORT_EN_C_DBG, \
+ _VLV_AUD_PORT_EN_D_DBG)
+#define VLV_AMP_MUTE (1 << 1)
+
#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 1946d7fb3c2e..071668bfe5d1 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness < 0)
+ if (display->params.invert_brightness < 0)
return val;
- if (i915->display.params.invert_brightness > 0 ||
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
+ if (display->params.invert_brightness > 0 ||
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -126,15 +126,15 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness > 0 ||
- (i915->display.params.invert_brightness == 0 &&
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (display->params.invert_brightness > 0 ||
+ (display->params.invert_brightness == 0 &&
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -761,8 +761,8 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level <= panel->backlight.min) {
- panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.level < panel->backlight.min) {
+ panel->backlight.level = panel->backlight.min;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
scale_hw_to_user(connector,
@@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
else
props.power = FB_BLANK_POWERDOWN;
- name = kstrdup("intel_backlight", GFP_KERNEL);
+ name = kstrdup_const("intel_backlight", GFP_KERNEL);
if (!name)
return -ENOMEM;
@@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
* compatibility. Use unique names for subsequent backlight devices as a
* fallback when the default name already exists.
*/
- kfree(name);
+ kfree_const(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
@@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
connector->base.base.id, connector->base.name, name);
out:
- kfree(name);
+ kfree_const(name);
return ret;
}
@@ -1642,17 +1642,17 @@ void intel_backlight_update(struct intel_atomic_state *state,
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_has_quirk(display, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
connector->base.base.id, connector->base.name);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT\n",
connector->base.base.id, connector->base.name);
return 0;
@@ -1660,16 +1660,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs))
+ if (drm_WARN_ON(display->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] failed to setup backlight\n",
connector->base.base.id, connector->base.name);
return ret;
@@ -1677,7 +1677,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
panel->backlight.present = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
@@ -1821,7 +1821,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fe52c06271ef..661842a3c2e6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/firmware.h>
+
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
@@ -593,11 +595,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
-static const struct lvds_pnp_id *
+static const struct drm_edid_product_id *
get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
+ /* These two are supposed to have the same layout in memory. */
+ BUILD_BUG_ON(sizeof(struct lvds_pnp_id) != sizeof(struct drm_edid_product_id));
+
return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
}
@@ -611,19 +616,6 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
-static void dump_pnp_id(struct drm_i915_private *i915,
- const struct lvds_pnp_id *pnp_id,
- const char *name)
-{
- u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
- char vend[4];
-
- drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
- name, drm_edid_decode_mfg_id(mfg_name, vend),
- pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
- pnp_id->mfg_week, pnp_id->mfg_year + 1990);
-}
-
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
@@ -662,21 +654,21 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *edid_id;
- struct lvds_pnp_id edid_id_nodate;
- const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
+ struct drm_edid_product_id product_id, product_id_nodate;
+ struct drm_printer p;
int i, best = -1;
- if (!edid)
+ if (!drm_edid)
return -1;
- edid_id = (const void *)&edid->mfg_id[0];
+ drm_edid_get_product_id(drm_edid, &product_id);
- edid_id_nodate = *edid_id;
- edid_id_nodate.mfg_week = 0;
- edid_id_nodate.mfg_year = 0;
+ product_id_nodate = product_id;
+ product_id_nodate.week_of_manufacture = 0;
+ product_id_nodate.year_of_manufacture = 0;
- dump_pnp_id(i915, edid_id, "EDID");
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID");
+ drm_edid_print_product_id(&p, &product_id, true);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
@@ -687,11 +679,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
return -1;
for (i = 0; i < 16; i++) {
- const struct lvds_pnp_id *vbt_id =
+ const struct drm_edid_product_id *vbt_id =
get_lvds_pnp_id(data, ptrs, i);
/* full match? */
- if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ if (!memcmp(vbt_id, &product_id, sizeof(*vbt_id)))
return i;
/*
@@ -699,7 +691,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
* and the VBT entry does not specify a date.
*/
if (best < 0 &&
- !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ !memcmp(vbt_id, &product_id_nodate, sizeof(*vbt_id)))
best = i;
}
@@ -885,7 +877,8 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *pnp_id;
+ const struct drm_edid_product_id *pnp_id;
+ struct drm_printer p;
int panel_type = panel->vbt.panel_type;
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -900,7 +893,9 @@ parse_lfp_data(struct drm_i915_private *i915,
parse_lfp_panel_dtd(i915, panel, data, ptrs);
pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
- dump_pnp_id(i915, pnp_id, "Panel");
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel");
+ drm_edid_print_product_id(&p, pnp_id, false);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
@@ -1955,16 +1950,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
- struct intel_panel *panel)
+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
- /* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(i915))
- return;
-
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
@@ -2000,6 +1991,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
+/*
+ * Some machines (eg. Lenovo 82TQ) appear to have broken
+ * VBT sequences:
+ * - INIT_OTP is not present at all
+ * - what should be in INIT_OTP is in DISPLAY_ON
+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
+ * (along with the actual backlight stuff)
+ *
+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
+ *
+ * TODO: Do we need to limit this to specific machines,
+ * or examine the contents of the sequences to
+ * avoid false positives?
+ */
+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+
+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
+ }
+}
+
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (DISPLAY_VER(i915) >= 11)
+ icl_fixup_mipi_sequences(i915, panel);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_fixup_mipi_sequences(i915, panel);
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
@@ -2699,6 +2725,57 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
print_ddi_port(devdata);
}
+static int child_device_expected_size(u16 version)
+{
+ BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
+
+ if (version > 256)
+ return -ENOENT;
+ else if (version >= 256)
+ return 40;
+ else if (version >= 216)
+ return 39;
+ else if (version >= 196)
+ return 38;
+ else if (version >= 195)
+ return 37;
+ else if (version >= 111)
+ return LEGACY_CHILD_DEVICE_CONFIG_SIZE;
+ else if (version >= 106)
+ return 27;
+ else
+ return 22;
+}
+
+static bool child_device_size_valid(struct drm_i915_private *i915, int size)
+{
+ int expected_size;
+
+ expected_size = child_device_expected_size(i915->display.vbt.version);
+ if (expected_size < 0) {
+ expected_size = sizeof(struct child_device_config);
+ drm_dbg(&i915->drm,
+ "Expected child device config size for VBT version %u not known; assuming %d\n",
+ i915->display.vbt.version, expected_size);
+ }
+
+ /* Flag an error for unexpected size, but continue anyway. */
+ if (size != expected_size)
+ drm_err(&i915->drm,
+ "Unexpected child device config size %d (expected %d for VBT version %u)\n",
+ size, expected_size, i915->display.vbt.version);
+
+ /* The legacy sized child device config is the minimum we need. */
+ if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
+ drm_dbg_kms(&i915->drm,
+ "Child device config size %d is too small.\n",
+ size);
+ return false;
+ }
+
+ return true;
+}
+
static void
parse_general_definitions(struct drm_i915_private *i915)
{
@@ -2706,7 +2783,6 @@ parse_general_definitions(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
int i, child_device_num;
- u8 expected_size;
u16 block_size;
int bus_pin;
@@ -2730,39 +2806,8 @@ parse_general_definitions(struct drm_i915_private *i915)
if (intel_gmbus_is_valid_pin(i915, bus_pin))
i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->display.vbt.version < 106) {
- expected_size = 22;
- } else if (i915->display.vbt.version < 111) {
- expected_size = 27;
- } else if (i915->display.vbt.version < 195) {
- expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->display.vbt.version == 195) {
- expected_size = 37;
- } else if (i915->display.vbt.version <= 215) {
- expected_size = 38;
- } else if (i915->display.vbt.version <= 250) {
- expected_size = 39;
- } else {
- expected_size = sizeof(*child);
- BUILD_BUG_ON(sizeof(*child) < 39);
- drm_dbg(&i915->drm,
- "Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->display.vbt.version, expected_size);
- }
-
- /* Flag an error for unexpected size, but continue anyway. */
- if (defs->child_dev_size != expected_size)
- drm_err(&i915->drm,
- "Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->display.vbt.version);
-
- /* The legacy sized child device config is the minimum we need. */
- if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&i915->drm,
- "Child device config size %u is too small.\n",
- defs->child_dev_size);
+ if (!child_device_size_valid(i915, defs->child_dev_size))
return;
- }
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
@@ -2838,9 +2883,8 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
{
+ unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
enum port port;
- int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
- BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
@@ -2950,6 +2994,43 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
return vbt;
}
+static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
+{
+ struct vbt_header *vbt = NULL;
+ const struct firmware *fw = NULL;
+ const char *name = i915->display.params.vbt_firmware;
+ int ret;
+
+ if (!name || !*name)
+ return NULL;
+
+ ret = request_firmware(&fw, name, i915->drm.dev);
+ if (ret) {
+ drm_err(&i915->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
+ return NULL;
+ }
+
+ if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
+ vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (vbt) {
+ drm_dbg_kms(&i915->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
+ if (size)
+ *size = fw->size;
+ }
+ } else {
+ drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
+ }
+
+ release_firmware(fw);
+
+ return vbt;
+}
+
static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
{
intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset);
@@ -2957,7 +3038,8 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
}
-static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
{
u32 count, data, found, store = 0;
u32 static_region, oprom_offset;
@@ -3000,6 +3082,9 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+ if (size)
+ *size = vbt_size;
+
return (struct vbt_header *)vbt;
err_free_vbt:
@@ -3008,7 +3093,8 @@ err_not_found:
return NULL;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
void __iomem *p = NULL, *oprom;
@@ -3057,6 +3143,9 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
pci_unmap_rom(pdev, oprom);
+ if (sizep)
+ *sizep = vbt_size;
+
drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
return vbt;
@@ -3069,6 +3158,32 @@ err_unmap_oprom:
return NULL;
}
+static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
+{
+ const struct vbt_header *vbt = NULL;
+ intel_wakeref_t wakeref;
+
+ vbt = firmware_get_vbt(i915, sizep);
+
+ if (!vbt)
+ vbt = intel_opregion_get_vbt(i915, sizep);
+
+ /*
+ * If the OpRegion does not have VBT, look in SPI flash
+ * through MMIO or PCI mapping
+ */
+ if (!vbt && IS_DGFX(i915))
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = spi_oprom_get_vbt(i915, sizep);
+
+ if (!vbt)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = oprom_get_vbt(i915, sizep);
+
+ return vbt;
+}
+
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
* @i915: i915 device instance
@@ -3080,7 +3195,6 @@ err_unmap_oprom:
void intel_bios_init(struct drm_i915_private *i915)
{
const struct vbt_header *vbt;
- struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
INIT_LIST_HEAD(&i915->display.vbt.display_devices);
@@ -3094,21 +3208,7 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
- vbt = intel_opregion_get_vbt(i915, NULL);
-
- /*
- * If the OpRegion does not have VBT, look in SPI flash through MMIO or
- * PCI mapping
- */
- if (!vbt && IS_DGFX(i915)) {
- oprom_vbt = spi_oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
-
- if (!vbt) {
- oprom_vbt = oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
+ vbt = intel_bios_get_vbt(i915, NULL);
if (!vbt)
goto out;
@@ -3141,7 +3241,7 @@ out:
parse_sdvo_device_mapping(i915);
parse_ddi_ports(i915);
- kfree(oprom_vbt);
+ kfree(vbt);
}
static void intel_bios_init_panel(struct drm_i915_private *i915,
@@ -3313,8 +3413,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (intel_opregion_get_vbt(i915, NULL))
- return true;
+ return intel_opregion_vbt_present(i915);
}
return false;
@@ -3351,6 +3450,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
+ if (!devdata)
+ return false;
+
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
@@ -3672,13 +3774,12 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
const void *vbt;
size_t vbt_size;
- /*
- * FIXME: VBT might originate from other places than opregion, and then
- * this would be incorrect.
- */
- vbt = intel_opregion_get_vbt(i915, &vbt_size);
- if (vbt)
+ vbt = intel_bios_get_vbt(i915, &vbt_size);
+
+ if (vbt) {
seq_write(m, vbt, vbt_size);
+ kfree(vbt);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7f2a50b4f494..972ea887e232 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -162,7 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
+ drm_err(&dev_priv->drm,
+ "Failed to disable qgv points (0x%x) points: 0x%x\n",
+ ret, points_mask);
return ret;
}
@@ -290,8 +292,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp = &qi->points[i];
ret = intel_read_qgv_point_info(dev_priv, sp, i);
- if (ret)
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
return ret;
+ }
drm_dbg_kms(&dev_priv->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
@@ -659,6 +663,22 @@ static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
return bi->psf_bw[psf_gv_point];
}
+static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+ int num_active_planes, int qgv_point)
+{
+ unsigned int idx;
+
+ if (DISPLAY_VER(i915) >= 12)
+ idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ else
+ idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+
+ if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ return 0;
+
+ return i915->display.bw.max[idx].deratedbw[qgv_point];
+}
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@@ -735,6 +755,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -804,6 +825,80 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+ int num_active_planes)
+{
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int max_bw_point = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate =
+ icl_qgv_bw(i915, num_active_planes, i);
+
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = BIT(i);
+ max_bw = max_data_rate;
+ }
+ }
+
+ return max_bw_point;
+}
+
+static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+ unsigned int qgv_points,
+ unsigned int psf_points)
+{
+ return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+}
+
+static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int max_bw_point_mask = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(i915, i);
+
+ if (max_data_rate > max_bw) {
+ max_bw_point_mask = BIT(i);
+ max_bw = max_data_rate;
+ } else if (max_data_rate == max_bw) {
+ max_bw_point_mask |= BIT(i);
+ }
+ }
+
+ return max_bw_point_mask;
+}
+
+static void icl_force_disable_sagv(struct drm_i915_private *i915,
+ struct intel_bw_state *bw_state)
+{
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
+
+ drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ bw_state->qgv_points_mask);
+
+ icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+}
+
static int mtl_find_qgv_points(struct drm_i915_private *i915,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -881,8 +976,6 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int max_bw_point = 0;
- unsigned int max_bw = 0;
unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 psf_points = 0;
@@ -895,31 +988,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int idx;
- unsigned int max_data_rate;
-
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, i);
- else
- idx = icl_max_bw_index(i915, num_active_planes, i);
-
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
- continue;
-
- max_data_rate = i915->display.bw.max[idx].deratedbw[i];
-
- /*
- * We need to know which qgv point gives us
- * maximum bandwidth in order to disable SAGV
- * if we find that we exceed SAGV block time
- * with watermarks. By that moment we already
- * have those, as it is calculated earlier in
- * intel_atomic_check,
- */
- if (max_data_rate > max_bw) {
- max_bw_point = i;
- max_bw = max_data_rate;
- }
+ unsigned int max_data_rate = icl_qgv_bw(i915,
+ num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
@@ -963,20 +1033,18 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* cause.
*/
if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = BIT(max_bw_point);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
- max_bw_point);
+ qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
+ drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ qgv_points);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask =
- ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) &
- icl_qgv_points_mask(i915);
-
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
@@ -1272,8 +1340,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state))
+ (intel_can_enable_sagv(i915, old_bw_state) !=
+ intel_can_enable_sagv(i915, new_bw_state) ||
+ new_bw_state->force_check_qgv))
changed = true;
/*
@@ -1287,6 +1356,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_bw_state->force_check_qgv = false;
+
return 0;
}
@@ -1313,7 +1384,7 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *dev_priv)
+int intel_bw_init(struct drm_i915_private *i915)
{
struct intel_bw_state *state;
@@ -1321,8 +1392,15 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
+ intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
&state->base, &intel_bw_funcs);
+ /*
+ * Limit this only if we have SAGV. And for Display version 14 onwards
+ * sagv is handled though pmdemand requests
+ */
+ if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
+ icl_force_disable_sagv(i915, state);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 59cb4fc5db76..161813cca473 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -47,12 +47,19 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
+ /*
+ * Flag to force the QGV comparison in atomic check right after the
+ * hw state readout
+ */
+ bool force_check_qgv;
+
int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
-#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+#define to_intel_bw_state(global_state) \
+ container_of_const((global_state), struct intel_bw_state, base)
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed89b86ea625..7a833b5f2de2 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -39,6 +39,8 @@
#include "intel_pcode.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
+#include "skl_watermark.h"
+#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
/**
@@ -63,6 +65,32 @@
* DMC will not change the active CDCLK frequency however, so that part
* will still be performed by the driver directly.
*
+ * There are multiple components involved in the generation of the CDCLK
+ * frequency:
+ *
+ * - We have the CDCLK PLL, which generates an output clock based on a
+ * reference clock and a ratio parameter.
+ * - The CD2X Divider, which divides the output of the PLL based on a
+ * divisor selected from a set of pre-defined choices.
+ * - The CD2X Squasher, which further divides the output based on a
+ * waveform represented as a sequence of bits where each zero
+ * "squashes out" a clock cycle.
+ * - And, finally, a fixed divider that divides the output frequency by 2.
+ *
+ * As such, the resulting CDCLK frequency can be calculated with the
+ * following formula:
+ *
+ * cdclk = vco / cd2x_div / (sq_len / sq_div) / 2
+ *
+ * , where vco is the frequency generated by the PLL; cd2x_div
+ * represents the CD2X Divider; sq_len and sq_div are the bit length
+ * and the number of high bits for the CD2X Squasher waveform, respectively;
+ * and 2 represents the fixed divider.
+ *
+ * Note that some older platforms do not contain the CD2X Divider
+ * and/or CD2X Squasher, in which case we can ignore their respective
+ * factors in the formula above.
+ *
* Several methods exist to change the CDCLK frequency, which ones are
* supported depends on the platform:
*
@@ -993,15 +1021,14 @@ static int skl_cdclk_decimal(int cdclk)
return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
- int vco)
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco)
{
- bool changed = dev_priv->skl_preferred_vco_freq != vco;
+ bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco;
- dev_priv->skl_preferred_vco_freq = vco;
+ i915->display.cdclk.skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(dev_priv);
+ intel_update_max_cdclk(i915);
}
static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
@@ -1205,7 +1232,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
- if (dev_priv->skl_preferred_vco_freq == 0)
+ if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
dev_priv->display.cdclk.hw.vco);
return;
@@ -1213,7 +1240,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
cdclk_config = dev_priv->display.cdclk.hw;
- cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
cdclk_config.vco = 8100000;
cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
@@ -1391,7 +1418,7 @@ static const struct intel_cdclk_vals mtl_cdclk_table[] = {
{}
};
-static const struct intel_cdclk_vals lnl_cdclk_table[] = {
+static const struct intel_cdclk_vals xe2lpd_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
@@ -1656,6 +1683,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
out:
+ if (DISPLAY_VER(dev_priv) >= 20)
+ cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
@@ -1850,6 +1879,37 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 20;
+}
+
+static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return MDCLK_SOURCE_SEL_CDCLK_PLL;
+
+ return MDCLK_SOURCE_SEL_CD2XCLK;
+}
+
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk);
+
+ /* Otherwise, source for MDCLK is CD2XCLK. */
+ return 2;
+}
+
+static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_mdclk_cdclk_ratio(i915, cdclk_config),
+ cdclk_config->joined_mbus);
+}
+
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
@@ -1954,7 +2014,7 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (DISPLAY_VER(i915) >= 20)
- val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
+ val |= xe2lpd_mdclk_source_sel(i915);
else
val |= skl_cdclk_decimal(cdclk);
@@ -1967,7 +2027,6 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u16 waveform;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1982,10 +2041,11 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
} else
bxt_cdclk_pll_update(dev_priv, vco);
- waveform = cdclk_squash_waveform(dev_priv, cdclk);
+ if (HAS_CDCLK_SQUASH(dev_priv)) {
+ u16 waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
+ }
intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
@@ -2030,6 +2090,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
cdclk_config, &mid_cdclk_config)) {
_bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
@@ -2038,6 +2101,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
_bxt_set_cdclk(dev_priv, cdclk_config, pipe);
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (DISPLAY_VER(dev_priv) >= 14)
/*
* NOOP - No Pcode communication needed for
@@ -2260,16 +2326,15 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
- * configurations requires a modeset on all pipes
+ * intel_cdclk_clock_changed - Check whether the clock changed
* @a: first CDCLK configuration
* @b: second CDCLK configuration
*
* Returns:
- * True if changing between the two CDCLK configurations
- * requires all pipes to be off, false if not.
+ * True if CDCLK changed in a way that requires re-programming and
+ * False otherwise.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
@@ -2322,7 +2387,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
- return intel_cdclk_needs_modeset(a, b) ||
+ return intel_cdclk_clock_changed(a, b) ||
a->voltage_level != b->voltage_level;
}
@@ -2368,18 +2433,9 @@ static void intel_pcode_notify(struct drm_i915_private *i915,
ret);
}
-/**
- * intel_set_cdclk - Push the CDCLK configuration to the hardware
- * @dev_priv: i915 device
- * @cdclk_config: new CDCLK configuration
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware based on the passed in CDCLK state,
- * if necessary.
- */
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe)
+ enum pipe pipe, const char *context)
{
struct intel_encoder *encoder;
@@ -2389,7 +2445,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
- intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
+ intel_cdclk_dump_config(dev_priv, cdclk_config, context);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2519,6 +2575,17 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
update_cdclk, update_pipe_count);
}
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+
+ return new_cdclk_state && !new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk;
+}
+
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
* @state: intel atomic state
@@ -2534,7 +2601,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2543,12 +2611,32 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ /*
+ * mbus joining will be changed later by
+ * intel_dbuf_mbus_{pre,post}_ddb_update()
+ */
+ cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus;
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe,
+ "Pre changing CDCLK to");
}
/**
@@ -2566,7 +2654,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2575,12 +2663,16 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe,
+ "Post changing CDCLK to");
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2731,25 +2823,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->dsc.compression_enable)
min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
- /*
- * HACK. Currently for TGL/DG2 platforms we calculate
- * min_cdclk initially based on pixel_rate divided
- * by 2, accounting for also plane requirements,
- * however in some cases the lowest possible CDCLK
- * doesn't work and causing the underruns.
- * Explicitly stating here that this seems to be currently
- * rather a Hack, than final solution.
- */
- if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
- /*
- * Clamp to max_cdclk_freq in case pixel rate is higher,
- * in order not to break an 8K, but still leave W/A at place.
- */
- min_cdclk = max_t(int, min_cdclk,
- min_t(int, crtc_state->pixel_rate,
- dev_priv->display.cdclk.max_cdclk_freq));
- }
-
return min_cdclk;
}
@@ -2937,7 +3010,7 @@ static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
vco = cdclk_state->logical.vco;
if (!vco)
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.enable)
@@ -3058,6 +3131,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3121,6 +3195,20 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
return 0;
}
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->actual.joined_mbus = joined_mbus;
+ cdclk_state->logical.joined_mbus = joined_mbus;
+
+ return intel_atomic_lock_global_state(&cdclk_state->base);
+}
+
int intel_cdclk_init(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state *cdclk_state;
@@ -3229,17 +3317,28 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes_late(state, "CDCLK change");
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
+ if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) !=
+ intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) {
+ int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual);
+
+ ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio);
+ if (ret)
+ return ret;
+ }
+
drm_dbg_kms(&dev_priv->drm,
"New cdclk calculated to be logical %u kHz, actual %u kHz\n",
new_cdclk_state->logical.cdclk,
@@ -3297,7 +3396,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
@@ -3339,13 +3438,13 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
- dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+ dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ dev_priv->display.cdclk.max_dotclk_freq);
}
/**
@@ -3519,7 +3618,7 @@ static int i915_cdclk_info_show(struct seq_file *m, void *unused)
seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq);
return 0;
}
@@ -3534,13 +3633,6 @@ void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
i915, &i915_cdclk_info_fops);
}
-static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
- .get_cdclk = bxt_get_cdclk,
- .set_cdclk = bxt_set_cdclk,
- .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = rplu_calc_voltage_level,
-};
-
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3684,10 +3776,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
- dev_priv->display.cdclk.table = lnl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+ dev_priv->display.cdclk.table = xe2lpd_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 14) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
dev_priv->display.cdclk.table = mtl_cdclk_table;
} else if (IS_DG2(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0cd..cfdcdec07a4d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -18,6 +18,8 @@ struct intel_crtc_state;
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
+ /* This field is only valid for Xe2LPD and above. */
+ bool joined_mbus;
};
struct intel_cdclk_state {
@@ -51,6 +53,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
@@ -60,8 +65,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b);
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config);
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state);
void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct drm_i915_private *i915,
@@ -72,10 +80,13 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
bool *need_cdclk_calc);
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
-#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define to_intel_cdclk_state(global_state) \
+ container_of_const((global_state), struct intel_cdclk_state, base)
+
#define intel_atomic_get_old_cdclk_state(state) \
to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index ca7112b32cb3..d23163dc64d4 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -616,19 +616,19 @@ static void vlv_load_wgc_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe),
csc->coeff[2]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe),
csc->coeff[4] << 16 | csc->coeff[3]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe),
csc->coeff[5]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe),
csc->coeff[8]);
}
@@ -639,25 +639,25 @@ static void vlv_read_wgc_csc(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe));
csc->coeff[2] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe));
csc->coeff[3] = tmp & 0xffff;
csc->coeff[4] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe));
csc->coeff[5] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe));
csc->coeff[8] = tmp & 0xffff;
}
@@ -1227,7 +1227,7 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(dev_priv, PALETTE(pipe, i),
+ intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1240,9 +1240,11 @@ static void i9xx_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i9xx_lut_10_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i9xx_lut_10_udw(&lut[i]));
}
}
@@ -1274,9 +1276,11 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
@@ -3150,7 +3154,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -3176,8 +3181,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i9xx_lut_10_pack(&lut[i], ldw, udw);
}
@@ -3224,8 +3231,10 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ u32 ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 9f4ae58f3e7e..bb99ea533842 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -8,7 +8,35 @@
#include "intel_display_reg_defs.h"
-/* legacy palette */
+/* GMCH palette */
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode ldw */
+#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
+#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
+#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode udw */
+#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
+#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
+#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
+#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
+#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
+#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
+#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
+#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
+#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
+#define PALETTE(dev_priv, pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
+
+/* ilk+ palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
/* see PALETTE_* for the bits */
@@ -228,12 +256,12 @@
#define _PIPE_A_WGC_C21_C20 0x600C0 /* s2.10 */
#define _PIPE_A_WGC_C22 0x600C4 /* s2.10 */
-#define PIPE_WGC_C01_C00(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C01_C00)
-#define PIPE_WGC_C02(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C02)
-#define PIPE_WGC_C11_C10(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C11_C10)
-#define PIPE_WGC_C12(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C12)
-#define PIPE_WGC_C21_C20(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C21_C20)
-#define PIPE_WGC_C22(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C22)
+#define PIPE_WGC_C01_C00(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C01_C00)
+#define PIPE_WGC_C02(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C02)
+#define PIPE_WGC_C11_C10(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C11_C10)
+#define PIPE_WGC_C12(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C12)
+#define PIPE_WGC_C21_C20(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C21_C20)
+#define PIPE_WGC_C22(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C22)
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index b0983edccf3f..0964e392d02c 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -25,28 +25,26 @@
4 * (dw))
#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
+#define CL_POWER_DOWN_ENABLE REG_BIT(4)
+#define SUS_CLOCK_CONFIG REG_GENMASK(1, 0)
#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-#define EDP4K2K_MODE_OVRD_EN (1 << 3)
-#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
+#define PG_SEQ_DELAY_OVERRIDE_MASK REG_GENMASK(26, 25)
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE REG_BIT(24)
+#define PWR_DOWN_LN_MASK REG_GENMASK(7, 4)
+#define PWR_UP_ALL_LANES REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x0)
+#define PWR_DOWN_LN_3_2_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xe)
+#define PWR_DOWN_LN_3_2 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xc)
+#define PWR_DOWN_LN_3 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x8)
+#define PWR_DOWN_LN_2_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x7)
+#define PWR_DOWN_LN_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x3)
+#define PWR_DOWN_LN_3_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xa)
+#define PWR_DOWN_LN_3_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xb)
+#define EDP4K2K_MODE_OVRD_EN REG_BIT(3)
+#define EDP4K2K_MODE_OVRD_OPTIMIZED REG_BIT(2)
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
-#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_LANE_ENABLE_AUX REG_BIT(0)
/* ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
@@ -54,24 +52,22 @@
_ICL_PORT_COMP + 4 * (dw))
#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
-#define COMP_INIT (1 << 31)
+#define COMP_INIT REG_BIT(31)
#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
+#define PROCESS_INFO_MASK REG_GENMASK(28, 26)
+#define PROCESS_INFO_DOT_0 REG_FIELD_PREP(PROCESS_INFO_MASK, 0)
+#define PROCESS_INFO_DOT_1 REG_FIELD_PREP(PROCESS_INFO_MASK, 1)
+#define PROCESS_INFO_DOT_4 REG_FIELD_PREP(PROCESS_INFO_MASK, 2)
+#define VOLTAGE_INFO_MASK REG_GENMASK(25, 24)
+#define VOLTAGE_INFO_0_85V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 0)
+#define VOLTAGE_INFO_0_95V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 1)
+#define VOLTAGE_INFO_1_05V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 2)
#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
-#define IREFGEN (1 << 24)
+#define IREFGEN REG_BIT(24)
#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
@@ -92,9 +88,9 @@
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
-#define COMMON_KEEPER_EN (1 << 26)
-#define LATENCY_OPTIM_MASK (0x3 << 2)
-#define LATENCY_OPTIM_VAL(x) ((x) << 2)
+#define COMMON_KEEPER_EN REG_BIT(26)
+#define LATENCY_OPTIM_MASK REG_GENMASK(3, 2)
+#define LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(LATENCY_OPTIM_MASK, (x))
/* ICL Port TX registers */
#define _ICL_PORT_TX_AUX 0x380
@@ -111,42 +107,49 @@
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
-#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
-#define SWING_SEL_UPPER_MASK (1 << 15)
-#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
-#define SWING_SEL_LOWER_MASK (0x7 << 11)
-#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
-#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
-#define RCOMP_SCALAR(x) ((x) << 0)
-#define RCOMP_SCALAR_MASK (0xFF << 0)
+#define SWING_SEL_UPPER_MASK REG_BIT(15)
+#define SWING_SEL_UPPER(x) REG_FIELD_PREP(SWING_SEL_UPPER_MASK, (x) >> 3)
+#define SWING_SEL_LOWER_MASK REG_GENMASK(13, 11)
+#define SWING_SEL_LOWER(x) REG_FIELD_PREP(SWING_SEL_LOWER_MASK, (x) & 0x7)
+#define FRC_LATENCY_OPTIM_MASK REG_GENMASK(10, 8)
+#define FRC_LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(FRC_LATENCY_OPTIM_MASK, (x))
+#define RCOMP_SCALAR_MASK REG_GENMASK(7, 0)
+#define RCOMP_SCALAR(x) REG_FIELD_PREP(RCOMP_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
-#define LOADGEN_SELECT (1 << 31)
-#define POST_CURSOR_1(x) ((x) << 12)
-#define POST_CURSOR_1_MASK (0x3F << 12)
-#define POST_CURSOR_2(x) ((x) << 6)
-#define POST_CURSOR_2_MASK (0x3F << 6)
-#define CURSOR_COEFF(x) ((x) << 0)
-#define CURSOR_COEFF_MASK (0x3F << 0)
+#define LOADGEN_SELECT REG_BIT(31)
+#define POST_CURSOR_1_MASK REG_GENMASK(17, 12)
+#define POST_CURSOR_1(x) REG_FIELD_PREP(POST_CURSOR_1_MASK, (x))
+#define POST_CURSOR_2_MASK REG_GENMASK(11, 6)
+#define POST_CURSOR_2(x) REG_FIELD_PREP(POST_CURSOR_2_MASK, (x))
+#define CURSOR_COEFF_MASK REG_GENMASK(5, 0)
+#define CURSOR_COEFF(x) REG_FIELD_PREP(CURSOR_COEFF_MASK, (x))
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
-#define TX_TRAINING_EN (1 << 31)
-#define TAP2_DISABLE (1 << 30)
-#define TAP3_DISABLE (1 << 29)
-#define SCALING_MODE_SEL(x) ((x) << 18)
-#define SCALING_MODE_SEL_MASK (0x7 << 18)
-#define RTERM_SELECT(x) ((x) << 3)
-#define RTERM_SELECT_MASK (0x7 << 3)
+#define TX_TRAINING_EN REG_BIT(31)
+#define TAP2_DISABLE REG_BIT(30)
+#define TAP3_DISABLE REG_BIT(29)
+#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
+#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
+#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
+#define RTERM_SELECT(x) REG_FIELD_PREP(RTERM_SELECT_MASK, (x))
+
+#define ICL_PORT_TX_DW6_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(6, phy))
+#define ICL_PORT_TX_DW6_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(6, phy))
+#define ICL_PORT_TX_DW6_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(6, ln, phy))
+#define O_FUNC_OVRD_EN REG_BIT(7)
+#define O_LDO_REF_SEL_CRI REG_GENMASK(6, 1)
+#define O_LDO_BYPASS_CRI REG_BIT(0)
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
-#define N_SCALAR(x) ((x) << 24)
-#define N_SCALAR_MASK (0x7F << 24)
+#define N_SCALAR_MASK REG_GENMASK(30, 24)
+#define N_SCALAR(x) REG_FIELD_PREP(N_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 93479db0f89f..10e95dc425a6 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -348,7 +348,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -356,9 +356,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4bcf446c75f4..ccaa4cb2809b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -12,33 +12,31 @@
#include "intel_hdmi.h"
#include "intel_vrr.h"
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+static void intel_dump_crtc_timings(struct drm_printer *p,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
- "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
- "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
- "flags=0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
- mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
- mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->flags);
+ drm_printf(p, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+intel_dump_m_n_config(struct drm_printer *p,
+ const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
- drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->data_m, m_n->data_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ drm_printf(p, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -52,17 +50,7 @@ intel_dump_infoframe(struct drm_i915_private *i915,
}
static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
- const struct drm_dp_vsc_sdp *vsc)
-{
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
-
- drm_dp_vsc_sdp_log(&p, vsc);
-}
-
-static void
-intel_dump_buffer(struct drm_i915_private *i915,
- const char *prefix, const u8 *buf, size_t len)
+intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
@@ -130,71 +118,66 @@ const char *intel_output_format_name(enum intel_output_format format)
return output_format_str[format];
}
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+static void intel_dump_plane_state(struct drm_printer *p,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fb) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
return;
}
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
+ drm_printf(p, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
- drm_dbg_kms(&i915->drm,
- "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_printf(p, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void
-ilk_dump_csc(struct drm_i915_private *i915, const char *name,
+ilk_dump_csc(struct drm_i915_private *i915,
+ struct drm_printer *p,
+ const char *name,
const struct intel_csc_matrix *csc)
{
int i;
- drm_dbg_kms(&i915->drm,
- "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->preoff[0], csc->preoff[1], csc->preoff[2]);
+ drm_printf(p, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->preoff[0], csc->preoff[1], csc->preoff[2]);
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
if (DISPLAY_VER(i915) < 7)
return;
- drm_dbg_kms(&i915->drm,
- "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->postoff[0], csc->postoff[1], csc->postoff[2]);
+ drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->postoff[0], csc->postoff[1], csc->postoff[2]);
}
static void
-vlv_dump_csc(struct drm_i915_private *i915, const char *name,
+vlv_dump_csc(struct drm_printer *p, const char *name,
const struct intel_csc_matrix *csc)
{
int i;
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
}
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
@@ -205,85 +188,86 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct drm_printer p;
char buf[64];
int i;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
- crtc->base.base.id, crtc->base.name,
- str_yes_no(pipe_config->hw.enable), context);
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- drm_dbg_kms(&i915->drm,
- "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
- str_yes_no(pipe_config->hw.active),
- buf, pipe_config->output_types,
- intel_output_format_name(pipe_config->output_format),
- intel_output_format_name(pipe_config->sink_format));
-
- drm_dbg_kms(&i915->drm,
- "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
-
- drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
-
- drm_dbg_kms(&i915->drm,
- "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
- transcoder_name(pipe_config->master_transcoder),
- pipe_config->sync_mode_slaves_mask);
-
- drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
- intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
- intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
- pipe_config->bigjoiner_pipes);
-
- drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
- str_enabled_disabled(pipe_config->splitter.enable),
- pipe_config->splitter.link_count,
- pipe_config->splitter.pixel_overlap);
+ drm_printf(&p, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ intel_output_format_name(pipe_config->output_format),
+ intel_output_format_name(pipe_config->sink_format));
+
+ drm_printf(&p, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_printf(&p, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_printf(&p, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_printf(&p, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_printf(&p, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
if (pipe_config->has_pch_encoder)
- intel_dump_m_n_config(pipe_config, "fdi",
+ intel_dump_m_n_config(&p, pipe_config, "fdi",
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
if (intel_crtc_has_dp_encoder(pipe_config)) {
- intel_dump_m_n_config(pipe_config, "dp m_n",
+ intel_dump_m_n_config(&p, pipe_config, "dp m_n",
pipe_config->lane_count,
&pipe_config->dp_m_n);
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ intel_dump_m_n_config(&p, pipe_config, "dp m2_n2",
pipe_config->lane_count,
&pipe_config->dp_m2_n2);
- drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
- str_enabled_disabled(pipe_config->fec_enable),
- str_enabled_disabled(pipe_config->enhanced_framing));
-
- drm_dbg_kms(&i915->drm, "sdp split: %s\n",
- str_enabled_disabled(pipe_config->sdp_split_enable));
-
- drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
- str_enabled_disabled(pipe_config->has_psr),
- str_enabled_disabled(pipe_config->has_psr2),
- str_enabled_disabled(pipe_config->has_panel_replay),
- str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "fec: %s, enhanced framing: %s\n",
+ str_enabled_disabled(pipe_config->fec_enable),
+ str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_printf(&p, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_printf(&p, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
- drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_dbg_kms(&i915->drm,
- "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
- pipe_config->infoframes.gcp);
+ drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
@@ -301,91 +285,88 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
- intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ intel_dump_buffer("ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
-
- drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.mode));
- drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
- drm_dbg_kms(&i915->drm,
- "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
- pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
-
- drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
- pipe_config->linetime, pipe_config->ips_linetime);
+ drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_printf(&p, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.adjusted_mode);
+ drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_printf(&p, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (DISPLAY_VER(i915) >= 9)
- drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id,
- pipe_config->hw.scaling_filter);
+ drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
- drm_dbg_kms(&i915->drm,
- "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- drm_dbg_kms(&i915->drm,
- "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
- DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- str_enabled_disabled(pipe_config->pch_pfit.enabled),
- str_yes_no(pipe_config->pch_pfit.force_thru));
+ drm_printf(&p, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
- drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide,
- pipe_config->has_drrs);
+ drm_printf(&p, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
- intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+ intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(i915))
- drm_dbg_kms(&i915->drm,
- "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- drm_dbg_kms(&i915->drm,
- "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
-
- drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
- pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
- pipe_config->pre_csc_lut ?
- drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
- pipe_config->post_csc_lut ?
- drm_color_lut_size(pipe_config->post_csc_lut) : 0);
+ drm_printf(&p, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
+ pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
+ i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ pipe_config->pre_csc_lut ?
+ drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
+ pipe_config->post_csc_lut ?
+ drm_color_lut_size(pipe_config->post_csc_lut) : 0);
if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, "output csc", &pipe_config->output_csc);
+ ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, "pipe csc", &pipe_config->csc);
+ ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
else if (IS_CHERRYVIEW(i915))
- vlv_dump_csc(i915, "cgm csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
else if (IS_VALLEYVIEW(i915))
- vlv_dump_csc(i915, "wgc csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
dump_planes:
if (!state)
@@ -393,6 +374,6 @@ dump_planes:
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->pipe == crtc->pipe)
- intel_dump_plane_state(plane_state);
+ intel_dump_plane_state(&p, plane_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index f8b33999d43f..23a122ee20c9 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = i915_gem_object_get_dma_address(obj, 0);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -511,6 +509,24 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
+static void wa_16021440873(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 ctl = plane_state->ctl;
+ int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
+ enum pipe pipe = plane->pipe;
+
+ ctl &= ~MCURSOR_MODE_MASK;
+ ctl |= MCURSOR_MODE_64_2B;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), ctl);
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
+ PIPESRC_HEIGHT(et_y_position));
+}
+
static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -531,7 +547,11 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
} else {
- i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ /* Wa_16021440873 */
+ if (crtc_state->enable_psr2_su_region_et)
+ wa_16021440873(plane, crtc_state, plane_state);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
}
}
@@ -823,6 +843,28 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.format_mod_supported = intel_cursor_format_mod_supported,
};
+static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_mode_config *config = &i915->drm.mode_config;
+ struct drm_plane_size_hint hints[4];
+ int size, max_size, num_hints = 0;
+
+ max_size = min(config->cursor_width, config->cursor_height);
+
+ /* for simplicity only enumerate the supported square+POT sizes */
+ for (size = 64; size <= max_size; size *= 2) {
+ if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints)))
+ break;
+
+ hints[num_hints].width = size;
+ hints[num_hints].height = size;
+ num_hints++;
+ }
+
+ drm_plane_add_size_hints_property(&plane->base, hints, num_hints);
+}
+
struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
@@ -881,6 +923,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ intel_cursor_add_size_hints_property(cursor);
+
zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 64e0f820a789..8e3b13884bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -29,8 +29,11 @@
#define INTEL_CX0_LANE1 BIT(1)
#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
-bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
+
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
@@ -46,8 +49,7 @@ static int lane_mask_to_lane(u8 lane_mask)
return ilog2(lane_mask);
}
-static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
- struct intel_encoder *encoder)
+static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -114,16 +116,20 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
- enum port port, int lane)
+static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
@@ -135,20 +141,22 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
return;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
+static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
int command, int lane, u32 *val)
{
- enum phy phy = intel_port_to_phy(i915, port);
-
- if (__intel_de_wait_for_register(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+
+ if (intel_de_wait_custom(i915,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_FAST_US,
+ XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -158,31 +166,33 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
"PHY %c Hardware did not detect a timeout\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (*val & XELPDP_PORT_P2M_ERROR_SET) {
drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
return 0;
}
-static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_read_once(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -191,7 +201,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -200,33 +210,34 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
if (ack < 0)
return ack;
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
-static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 __intel_cx0_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to read successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_read_once(i915, port, lane, addr);
+ status = __intel_cx0_read_once(encoder, lane, addr);
if (status >= 0)
return status;
@@ -238,18 +249,20 @@ static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
return 0;
}
-static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 intel_cx0_read(struct intel_encoder *encoder,
u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
- return __intel_cx0_read(i915, port, lane, addr);
+ return __intel_cx0_read(encoder, lane, addr);
}
-static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_write_once(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -258,7 +271,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -274,45 +287,46 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (committed) {
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return 0;
}
-static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_write(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to write successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
+ status = __intel_cx0_write_once(encoder, lane, addr, data, committed);
if (status == 0)
return;
@@ -322,63 +336,66 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_write(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_write(i915, port, lane, addr, data, committed);
+ __intel_cx0_write(encoder, lane, addr, data, committed);
}
-static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
+static void intel_c20_sram_write(struct intel_encoder *encoder,
int lane, u16 addr, u16 data)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
}
-static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
+static u16 intel_c20_sram_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u16 val;
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
- val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
+ val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H);
val <<= 8;
- val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
+ val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L);
return val;
}
-static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_rmw(struct intel_encoder *encoder,
int lane, u16 addr, u8 clear, u8 set, bool committed)
{
u8 old, val;
- old = __intel_cx0_read(i915, port, lane, addr);
+ old = __intel_cx0_read(encoder, lane, addr);
val = (old & ~clear) | set;
if (val != old)
- __intel_cx0_write(i915, port, lane, addr, val, committed);
+ __intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_rmw(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
+ __intel_cx0_rmw(encoder, lane, addr, clear, set, committed);
}
static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
@@ -414,7 +431,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
@@ -423,7 +439,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
return;
- owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -433,14 +449,14 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
return;
}
- if (intel_is_c10phy(i915, phy)) {
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder)) {
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3),
C10_CMN3_TXVBOOST_MASK,
C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
MB_WRITE_UNCOMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1),
C10_TX1_TERMCTL_MASK,
C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
MB_WRITE_COMMITTED);
@@ -455,27 +471,27 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (!(lane_mask & owned_lane_mask))
continue;
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
MB_WRITE_COMMITTED);
}
/* Write Override enables in 0xD71 */
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD,
0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
MB_WRITE_COMMITTED);
- if (intel_is_c10phy(i915, phy))
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -1811,7 +1827,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state;
+ struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
int i;
if (intel_crtc_has_dp_encoder(crtc_state)) {
@@ -1843,7 +1859,7 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c10 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
intel_c10pll_update_pll(crtc_state, encoder);
return 0;
@@ -1856,7 +1872,6 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
intel_wakeref_t wakeref;
int i;
@@ -1867,16 +1882,15 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
* According to C10 VDR Register programming Sequence we need
* to do this to read PHY internal registers from MsgBus.
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, lane, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
- PHY_C10_VDR_PLL(i));
+ pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i));
- pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
- pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+ pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0));
+ pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@@ -1885,31 +1899,31 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
+ const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10;
int i;
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
/* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
pll_state->pll[i],
(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
}
@@ -2037,10 +2051,8 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
{
struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
- struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(&dig_port->base))
return intel_c10_phy_check_hdmi_link_rate(clock);
return intel_c20_phy_check_hdmi_link_rate(clock);
}
@@ -2067,7 +2079,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
/* try computed C20 HDMI tables before using consolidated tables */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock,
- &crtc_state->cx0pll_state.c20) == 0)
+ &crtc_state->dpll_hw_state.cx0pll.c20) == 0)
return 0;
}
@@ -2077,7 +2089,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c20 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
return 0;
}
}
@@ -2088,10 +2100,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_state(crtc_state, encoder);
return intel_c20pll_calc_state(crtc_state, encoder);
}
@@ -2149,7 +2158,6 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c20pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
intel_wakeref_t wakeref;
int i;
@@ -2157,25 +2165,25 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
/* Read Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_TX_CNTX_CFG(i));
else
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_TX_CNTX_CFG(i));
}
/* Read common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_CMN_CNTX_CFG(i));
else
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_CMN_CNTX_CFG(i));
}
@@ -2183,20 +2191,20 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i));
else
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i));
}
} else {
/* MPLLA configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i));
else
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i));
}
}
@@ -2327,7 +2335,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
+ const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
u32 clock = crtc_state->port_clock;
@@ -2338,7 +2346,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
dp = true;
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2347,7 +2355,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
*/
if (intel_c20_protocol_switch_valid(encoder)) {
for (i = 0; i < 4; i++)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
usleep_range(4000, 4100);
}
@@ -2355,63 +2363,63 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 3.1 Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
}
/* 3.2 common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
}
/* 3.3 mpllb or mplla configuration */
if (intel_c20phy_use_mpllb(pll_state)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
}
}
/* 4. Program custom width to match the link protocol */
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2420,7 +2428,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
@@ -2476,9 +2484,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
@@ -2508,11 +2516,12 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
- enum port port,
+static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
u8 lane_mask, u8 state)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
@@ -2528,7 +2537,7 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
}
intel_de_rmw(i915, buf_ctl2_reg,
@@ -2536,15 +2545,18 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask), 0,
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
-static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
+static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
@@ -2577,13 +2589,13 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
return val;
}
-static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
- struct intel_encoder *encoder,
+static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
bool lane_reversal)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(i915, port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
@@ -2593,19 +2605,19 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ lane_phy_current_status, lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
@@ -2613,16 +2625,16 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
- intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_RESET);
- intel_cx0_setup_powerdown(i915, port);
+ intel_cx0_setup_powerdown(encoder);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
@@ -2640,11 +2652,10 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
int i;
u8 disables;
bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
- enum port port = encoder->port;
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
@@ -2666,14 +2677,14 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
if (!(owned_lane_mask & lane_mask))
continue;
- intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
CONTROL2_DISABLE_SINGLE_TX,
disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
MB_WRITE_COMMITTED);
}
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
@@ -2705,7 +2716,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
@@ -2719,13 +2730,13 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
/* 2. Bring PHY out of reset. */
- intel_cx0_phy_lane_reset(i915, encoder, lane_reversal);
+ intel_cx0_phy_lane_reset(encoder, lane_reversal);
/*
* 3. Change Phy power state to Ready.
* TODO: For DP alt mode use only one lane.
*/
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_READY);
/*
@@ -2735,7 +2746,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
*/
/* 5. Program PHY internal PLL internal registers. */
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10_pll_program(i915, crtc_state, encoder);
else
intel_c20_pll_program(i915, crtc_state, encoder);
@@ -2767,10 +2778,10 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
@@ -2831,7 +2842,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
/*
@@ -2858,10 +2869,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK,
+ XELPDP_TBT_CLOCK_ACK,
+ 100, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -2892,12 +2903,12 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
static void intel_cx0pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_c10 = intel_is_c10phy(i915, phy);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ bool is_c10 = intel_encoder_is_c10phy(encoder);
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Change owned PHY lane power to Disable state. */
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
is_c10 ? CX0_P2PG_STATE_DISABLE :
CX0_P4PG_STATE_DISABLE);
@@ -2920,10 +2931,10 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
@@ -2944,7 +2955,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
/*
* 1. Follow the Display Voltage Frequency Switching Sequence Before
@@ -2958,8 +2969,8 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -3014,7 +3025,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
struct intel_c10pll_state *mpllb_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10;
int i;
if (intel_crtc_needs_fastset(state))
@@ -3043,10 +3054,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
else
intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
@@ -3055,10 +3063,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
@@ -3070,7 +3075,7 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
struct intel_c20pll_state *mpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20;
bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
@@ -3124,7 +3129,6 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- enum phy phy;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3138,14 +3142,13 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
return;
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
- phy = intel_port_to_phy(i915, encoder->port);
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
else
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c6682677253a..3e03af3e006c 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -11,7 +11,6 @@
#include <linux/bits.h>
enum icl_port_dpll_id;
-enum phy;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -22,7 +21,7 @@ struct intel_crtc_state;
struct intel_encoder;
struct intel_hdmi;
-bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mtl_pll_disable(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c587a8efeafc..3c3fc53376ce 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -200,10 +200,10 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
port_name(port));
}
-static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
- enum port port)
+static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
int timeout_us;
int ret;
@@ -218,7 +218,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
timeout_us = 1200;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
timeout_us = 3000;
else
timeout_us = 1000;
@@ -331,7 +331,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
intel_dp->DP = dig_port->saved_port_bits |
@@ -345,7 +344,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -632,6 +631,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -662,10 +662,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
+ if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- drm_dbg_kms(&dev_priv->drm,
- "Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -895,7 +894,6 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
/*
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -914,7 +912,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
else if (DISPLAY_VER(i915) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(i915, phy)))
+ intel_encoder_is_tc(&dig_port->base)))
return intel_aux_power_domain(dig_port);
else
return POWER_DOMAIN_INVALID;
@@ -984,7 +982,7 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
@@ -1113,7 +1111,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
@@ -1176,7 +1174,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1227,7 +1225,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1328,7 +1326,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1526,7 +1524,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1540,7 +1538,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1549,7 +1547,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder)
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1558,7 +1556,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
@@ -1570,7 +1568,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1584,7 +1582,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1593,7 +1591,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1602,7 +1600,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1614,7 +1612,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1637,7 +1635,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1646,7 +1644,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1655,7 +1653,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
enum intel_dpll_id id;
u32 val;
@@ -1680,7 +1678,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1694,7 +1692,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1703,7 +1701,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1712,7 +1710,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1767,7 +1765,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
if (drm_WARN_ON(&i915->drm, !pll))
@@ -1787,7 +1785,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
mutex_lock(&i915->display.dpll.lock);
@@ -1803,7 +1801,7 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
@@ -1820,7 +1818,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
@@ -2086,12 +2084,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (!intel_phy_is_tc(dev_priv, phy) ||
+ if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
@@ -2327,9 +2324,9 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_combo(i915, phy)) {
+ if (intel_encoder_is_combo(encoder)) {
+ enum phy phy = intel_encoder_to_phy(encoder);
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -2339,10 +2336,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
-/* Splitter enable for eDP MSO is limited to certain pipes. */
+/*
+ * Splitter enable for eDP MSO is limited to certain pipes, on certain
+ * platforms.
+ */
static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(i915) > 20)
+ return ~0;
+ else if (IS_ALDERLAKE_P(i915))
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2812,15 +2814,14 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv)) {
+ if (HAS_DP20(dev_priv))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
- if (crtc_state->has_panel_replay)
- drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
- DP_PANEL_REPLAY_ENABLE);
- }
+
+ /* Panel replay has to be enabled in sink dpcd before link training. */
+ if (crtc_state->has_panel_replay)
+ intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3095,39 +3096,48 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void intel_ddi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *slave_crtc;
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
- intel_disable_transcoder(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
- intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_dsc_disable(old_pipe_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_pipe_crtc_state);
else
- ilk_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_pipe_crtc_state);
}
+}
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
- const struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- intel_crtc_vblank_off(old_slave_crtc_state);
-
- intel_dsc_disable(old_slave_crtc_state);
- skl_scaler_disable(old_slave_crtc_state);
- }
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_post_disable_hdmi_or_sst(state, encoder, old_crtc_state,
+ old_conn_state);
/*
* When called from DP MST code:
@@ -3155,14 +3165,11 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
main_link_aux_power_domain_put(dig_port, old_crtc_state);
- if (is_tc_port)
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_put_link(dig_port);
}
@@ -3263,7 +3270,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -3347,14 +3353,14 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3362,10 +3368,10 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3374,7 +3380,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
- intel_crtc_vblank_on(crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
@@ -3470,19 +3482,17 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc *slave_crtc;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy))
+ if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
return;
- intel_update_active_dpll(state, crtc, encoder);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state))
- intel_update_active_dpll(state, slave_crtc, encoder);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state))
+ intel_update_active_dpll(state, pipe_crtc, encoder);
}
static void
@@ -3493,8 +3503,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ bool is_tc_port = intel_encoder_is_tc(encoder);
if (is_tc_port) {
struct intel_crtc *master_crtc =
@@ -3513,14 +3522,14 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_phy_set_lane_optim_mask(encoder,
- crtc_state->lane_lat_optim_mask);
+ bxt_dpio_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
}
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
int ln;
for (ln = 0; ln < 2; ln++)
@@ -3574,7 +3583,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3624,7 +3633,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -3681,7 +3690,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
if (intel_de_wait_for_set(dev_priv,
dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 1))
+ DP_TP_STATUS_IDLE_DONE, 2))
drm_err(&dev_priv->drm,
"Timed out waiting for DP idle patterns\n");
}
@@ -3946,7 +3955,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+ bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -3972,6 +3981,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_ADAPTIVE_SYNC);
intel_audio_codec_get_config(encoder, pipe_config);
}
@@ -4006,8 +4016,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
} else {
- intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4016,8 +4026,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
static void dg2_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
- crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+ intel_mpllb_readout_hw_state(encoder, &crtc_state->dpll_hw_state.mpllb);
+ crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->dpll_hw_state.mpllb);
intel_ddi_get_config(encoder, crtc_state);
}
@@ -4144,10 +4154,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
@@ -4159,10 +4166,9 @@ static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
bool fastset = true;
- if (intel_phy_is_tc(i915, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -4226,7 +4232,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -4256,7 +4262,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
@@ -4348,10 +4359,9 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp_encoder_flush_work(encoder);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_cleanup(dig_port);
intel_display_power_flush_work(i915);
@@ -4362,16 +4372,14 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp->reset_link_params = true;
intel_pps_encoder_reset(intel_dp);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_init_mode(dig_port);
}
@@ -4538,11 +4546,9 @@ static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc = intel_phy_is_tc(i915, phy);
+ bool is_tc = intel_encoder_is_tc(encoder);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
@@ -4824,10 +4830,7 @@ static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- return init_dp || intel_phy_is_tc(i915, phy);
+ return init_dp || intel_encoder_is_tc(encoder);
}
static bool assert_has_icl_dsi(struct drm_i915_private *i915)
@@ -5071,17 +5074,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
}
@@ -5126,7 +5129,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
goto err;
}
- if (intel_phy_is_tc(dev_priv, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
@@ -5155,7 +5158,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index de809e2d9cac..4d21ce734343 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1691,14 +1691,11 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_is_c10phy(i915, phy))
+ else if (!intel_encoder_is_c10phy(encoder))
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
@@ -1707,14 +1704,13 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
if (DISPLAY_VER(i915) >= 14) {
encoder->get_buf_trans = mtl_get_cx0_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
@@ -1725,16 +1721,16 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
} else if (IS_DG1(i915)) {
encoder->get_buf_trans = dg1_get_combo_buf_trans;
} else if (DISPLAY_VER(i915) >= 12) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = tgl_get_combo_buf_trans;
else
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
} else if (DISPLAY_VER(i915) == 11) {
- if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
+ if (IS_JASPERLAKE(i915))
encoder->get_buf_trans = jsl_get_combo_buf_trans;
- else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+ else if (IS_ELKHARTLAKE(i915))
encoder->get_buf_trans = ehl_get_combo_buf_trans;
- else if (intel_phy_is_combo(i915, phy))
+ else if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = icl_get_combo_buf_trans;
else
encoder->get_buf_trans = icl_get_mg_buf_trans;
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 42552d8c151e..e881bfeafb47 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -10,80 +10,185 @@
#include "i915_trace.h"
#include "intel_uncore.h"
+static inline struct intel_uncore *__to_uncore(struct intel_display *display)
+{
+ return &to_i915(display->drm)->uncore;
+}
+
static inline u32
-intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read(&i915->uncore, reg);
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
-intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read8(&i915->uncore, reg);
+ u8 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read8(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__)
static inline u64
-intel_de_read64_2x32(struct drm_i915_private *i915,
- i915_reg_t lower_reg, i915_reg_t upper_reg)
+__intel_de_read64_2x32(struct intel_display *display,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
{
- return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+ u64 val;
+
+ intel_dmc_wl_get(display, lower_reg);
+ intel_dmc_wl_get(display, upper_reg);
+
+ val = intel_uncore_read64_2x32(__to_uncore(display), lower_reg,
+ upper_reg);
+
+ intel_dmc_wl_put(display, upper_reg);
+ intel_dmc_wl_put(display, lower_reg);
+
+ return val;
}
+#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
- intel_uncore_posting_read(&i915->uncore, reg);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_posting_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
- intel_uncore_write(&i915->uncore, reg, val);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_write(__to_uncore(display), reg, val);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
+ u32 clear, u32 set)
{
- return intel_uncore_rmw(&i915->uncore, reg, clear, set);
+ return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
}
+#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__)
+
+static inline u32
+__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
+ u32 set)
+{
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = __intel_de_rmw_nowl(display, reg, clear, set);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
+}
+#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+____intel_de_wait_for_register_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+ return intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout);
}
+#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+__intel_de_wait(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
+ timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return __intel_wait_for_register(&i915->uncore, reg, mask, value,
- fast_timeout_us, slow_timeout_ms, out_value);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
{
- return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value,
+ fast_timeout_us, slow_timeout_ms, out_value);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout)
{
- return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout);
+}
+#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
+
+static inline int
+__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait(display, reg, mask, 0, timeout);
}
+#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -94,33 +199,38 @@ intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
- val = intel_uncore_read_fw(&i915->uncore, reg);
+ val = intel_uncore_read_fw(__to_uncore(display), reg);
trace_i915_reg_rw(false, reg, val, sizeof(val), true);
return val;
}
+#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
- intel_uncore_write_fw(&i915->uncore, reg, val);
+ intel_uncore_write_fw(__to_uncore(display), reg, val);
}
+#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read_notrace(&i915->uncore, reg);
+ return intel_uncore_read_notrace(__to_uncore(display), reg);
}
+#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg,
+ u32 val)
{
- intel_uncore_write_notrace(&i915->uncore, reg, val);
+ intel_uncore_write_notrace(__to_uncore(display), reg, val);
}
+#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__)
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ab2f52d21bad..273323f30ae2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -85,7 +85,6 @@
#include "intel_dvo.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -120,6 +119,7 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -275,6 +275,13 @@ static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
return hweight8(crtc_state->bigjoiner_pipes);
}
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return BIT(crtc->pipe) | crtc_state->bigjoiner_pipes;
+}
+
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
@@ -383,8 +390,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
break;
}
- if (intel_de_wait_for_register(dev_priv, dpll_reg,
- port_mask, expected_mask, 1000))
+ if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -430,6 +436,18 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
+ u32 set = 0;
+
+ if (DISPLAY_VER(dev_priv) == 14)
+ set |= DP_FEC_BS_JITTER_WA;
+
+ intel_de_rmw(dev_priv,
+ hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ clear, set);
+ }
+
val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
@@ -437,6 +455,14 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
return;
}
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ new_crtc_state->dsc.compression_enable) {
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+ val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
+ TRANSCONF_PIXEL_COUNT_SCALING_X4);
+ }
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
@@ -483,6 +509,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ old_crtc_state->dsc.compression_enable)
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
@@ -535,7 +566,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
- (plane->fbc &&
+ (plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -1552,18 +1583,21 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool apply)
+/* Display WA #1180: WaDisableScalarClockGating: glk */
+static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
- u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (apply)
- val |= mask;
- else
- val &= ~mask;
+ return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+}
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
+static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
@@ -1586,24 +1620,6 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
-static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
-
- /*
- * Enable sequence steps 1-7 on bigjoiner master
- */
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_pll_enable(state, master_crtc);
-
- if (crtc_state->shared_dpll)
- intel_enable_shared_dpll(crtc_state);
-
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_enable(state, master_crtc);
-}
-
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1639,90 +1655,107 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
- bool psl_clkgate_wa;
+ struct intel_crtc *pipe_crtc;
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state))
+ intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
- if (!new_crtc_state->bigjoiner_pipes) {
- intel_encoders_pre_pll_enable(state, crtc);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (new_crtc_state->shared_dpll)
- intel_enable_shared_dpll(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_encoders_pre_enable(state, crtc);
- } else {
- icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
+ if (pipe_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(pipe_crtc_state);
}
- intel_dsc_enable(new_crtc_state);
+ intel_encoders_pre_enable(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 13)
- intel_uncompressed_joiner_enable(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_set_pipe_src_size(new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipe_misc(new_crtc_state);
+ intel_dsc_enable(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(pipe_crtc_state);
+
+ intel_set_pipe_src_size(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipe_misc(pipe_crtc_state);
+ }
- if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
- !transcoder_is_dsi(cpu_transcoder))
+ if (!transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
- crtc->active = true;
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- /* Display WA #1180: WaDisableScalarClockGating: glk */
- psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
- new_crtc_state->pch_pfit.enabled;
- if (psl_clkgate_wa)
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+ pipe_crtc->active = true;
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_pfit_enable(new_crtc_state);
- else
- ilk_pfit_enable(new_crtc_state);
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- /*
- * On ILK+ LUT must be loaded before the pipe is running but with
- * clocks enabled
- */
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma/csc for pipe bottom color */
- if (DISPLAY_VER(dev_priv) < 9)
- intel_disable_primary_plane(new_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_pfit_enable(pipe_crtc_state);
+ else
+ ilk_pfit_enable(pipe_crtc_state);
- hsw_set_linetime_wm(new_crtc_state);
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_color_load_luts(pipe_crtc_state);
+ intel_color_commit_noarm(pipe_crtc_state);
+ intel_color_commit_arm(pipe_crtc_state);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (DISPLAY_VER(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_set_pipe_chicken(new_crtc_state);
+ hsw_set_linetime_wm(pipe_crtc_state);
- intel_initial_watermarks(state, crtc);
+ if (DISPLAY_VER(dev_priv) >= 11)
+ icl_set_pipe_chicken(pipe_crtc_state);
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- intel_crtc_vblank_on(new_crtc_state);
+ intel_initial_watermarks(state, pipe_crtc);
+ }
intel_encoders_enable(state, crtc);
- if (psl_clkgate_wa) {
- intel_crtc_wait_for_next_vblank(crtc);
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
- }
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+ enum pipe hsw_workaround_pipe;
- /* If we change the relative order between pipe/planes enabling, we need
- * to change the workaround. */
- hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- struct intel_crtc *wa_crtc;
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
+ intel_crtc_wait_for_next_vblank(pipe_crtc);
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
+ }
- wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+ /*
+ * If we change the relative order between pipe/planes
+ * enabling, we need to change the workaround.
+ */
+ hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ struct intel_crtc *wa_crtc =
+ intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
- intel_crtc_wait_for_next_vblank(wa_crtc);
- intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ }
}
}
@@ -1786,29 +1819,28 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *pipe_crtc;
/*
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- intel_encoders_disable(state, crtc);
- intel_encoders_post_disable(state, crtc);
- }
-
- intel_disable_shared_dpll(old_crtc_state);
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- intel_encoders_post_pll_disable(state, crtc);
+ intel_disable_shared_dpll(old_pipe_crtc_state);
+ }
- intel_dmc_disable_pipe(i915, crtc->pipe);
+ intel_encoders_post_pll_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
- intel_dmc_disable_pipe(i915, slave_crtc->pipe);
- }
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -1836,6 +1868,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
+/* Prefer intel_encoder_is_combo() */
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
@@ -1857,6 +1890,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_tc() */
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1877,6 +1911,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_snps() */
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1886,6 +1921,7 @@ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
+/* Prefer intel_encoder_to_phy() */
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
@@ -1903,6 +1939,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
return PHY_A + port - PORT_A;
}
+/* Prefer intel_encoder_to_tc() */
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
@@ -1914,6 +1951,41 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
return TC_PORT_1 + port - PORT_C;
}
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_phy(i915, encoder->port);
+}
+
+bool intel_encoder_is_combo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_snps(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+}
+
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_tc(i915, encoder->port);
+}
+
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
@@ -2381,7 +2453,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->max_dotclk_freq;
+ int clock_limit = i915->display.cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2405,7 +2477,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->max_dotclk_freq;
+ clock_limit = i915->display.cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
@@ -2709,15 +2781,6 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
-
- if (!crtc_state->enable_psr2_su_region_et)
- return;
-
- width = drm_rect_width(&crtc_state->psr2_su_area);
- height = drm_rect_height(&crtc_state->psr2_su_area);
-
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
- PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -3008,19 +3071,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(pipe_config);
+ i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
+
if (DISPLAY_VER(dev_priv) >= 4) {
- /* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
- else
- tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -3030,26 +3090,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
- DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
- FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
- FP1(crtc->pipe));
- } else {
- /* Mask out read-only status bits. */
- pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
- DPLL_PORTC_READY_MASK |
- DPLL_PORTB_READY_MASK);
- }
if (IS_CHERRYVIEW(dev_priv))
- chv_crtc_clock_get(crtc, pipe_config);
+ chv_crtc_clock_get(pipe_config);
else if (IS_VALLEYVIEW(dev_priv))
- vlv_crtc_clock_get(crtc, pipe_config);
+ vlv_crtc_clock_get(pipe_config);
else
- i9xx_crtc_clock_get(crtc, pipe_config);
+ i9xx_crtc_clock_get(pipe_config);
/*
* Normally the dotclock is filled in by the encoder .get_config()
@@ -3675,8 +3722,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3702,11 +3749,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -4723,8 +4770,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_bigjoiner_adjust_pipe_src(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4792,42 +4837,92 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
}
static bool
+intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
+ return a->vtotal == b->vtotal &&
+ a->target_rr == b->target_rr &&
+ a->duration_incr_ms == b->duration_incr_ms &&
+ a->duration_decr_ms == b->duration_decr_ms &&
+ a->mode == b->mode;
+}
+
+static bool
intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
{
return memcmp(a, b, len) == 0;
}
+static void __printf(5, 6)
+pipe_config_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (fastset)
+ drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+ else
+ drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+
+ va_end(args);
+}
+
static void
-pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
- bool fastset, const char *name,
+pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const char *loglevel;
+
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s infoframe\n", name);
- drm_dbg_kms(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg_kms(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ loglevel = KERN_DEBUG;
} else {
- drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
- drm_err(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ loglevel = KERN_ERR;
}
+
+ pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
+
+ drm_printf(p, "expected:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ drm_printf(p, "found:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, b);
}
static void
-pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
+ pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+
+ drm_printf(p, "expected:\n");
+ drm_dp_vsc_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_vsc_sdp_log(p, b);
+}
+
+static void
+pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
+ bool fastset, const char *name,
+ const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
struct drm_printer p;
if (fastset) {
@@ -4841,9 +4936,9 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
}
drm_printf(&p, "expected:\n");
- drm_dp_vsc_sdp_log(&p, a);
+ drm_dp_as_sdp_log(&p, a);
drm_printf(&p, "found:\n");
- drm_dp_vsc_sdp_log(&p, b);
+ drm_dp_as_sdp_log(&p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -4861,64 +4956,35 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
}
static void
-pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
+pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const char *loglevel;
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_DEBUG;
} else {
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_ERR;
}
-}
-static void __printf(4, 5)
-pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
- const char *name, const char *format, ...)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
+ pipe_config_mismatch(p, fastset, crtc, name, "buffer");
- if (fastset)
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
- else
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
- va_end(args);
+ print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
}
static void
-pipe_config_pll_mismatch(bool fastset,
+pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_dpll_hw_state *a,
@@ -4926,25 +4992,12 @@ pipe_config_pll_mismatch(bool fastset,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_dbg_kms(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_dbg_kms(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- } else {
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_err(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_err(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- }
+ drm_printf(p, "expected:\n");
+ intel_dpll_dump_hw_state(i915, p, a);
+ drm_printf(p, "found:\n");
+ intel_dpll_dump_hw_state(i915, p, b);
}
bool
@@ -4954,13 +5007,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
{
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_printer p;
bool ret = true;
+ if (fastset)
+ p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ else
+ p = drm_err_printer(&dev_priv->drm, NULL);
+
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
@@ -4972,7 +5031,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -4984,7 +5043,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
@@ -4996,7 +5055,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
__stringify(name) " is not bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
str_yes_no(pipe_config->name)); \
@@ -5006,7 +5065,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
@@ -5017,7 +5076,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
current_config->name.tu, \
@@ -5037,7 +5096,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_PLL(name) do { \
if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
&pipe_config->name)) { \
- pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
@@ -5070,7 +5129,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
@@ -5082,7 +5141,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5092,7 +5151,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
+ if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5103,7 +5172,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
- pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \
current_config->name, \
pipe_config->name, \
(len)); \
@@ -5116,7 +5185,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!intel_color_lut_equal(current_config, \
current_config->lut, pipe_config->lut, \
is_pre_csc_lut)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(lut), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \
"hw_state doesn't match sw_state"); \
ret = false; \
} \
@@ -5245,6 +5314,18 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
+ /*
+ * Panel replay has to be enabled before link training. PSR doesn't have
+ * this requirement -> check these only if using panel replay
+ */
+ if (current_config->has_panel_replay || pipe_config->has_panel_replay) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
+ PIPE_CONF_CHECK_BOOL(has_panel_replay);
+ }
+
PIPE_CONF_CHECK_BOOL(double_wide);
if (dev_priv->display.dpll.mgr)
@@ -5280,6 +5361,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -5331,6 +5413,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.flipline);
PIPE_CONF_CHECK_I(vrr.pipeline_full);
PIPE_CONF_CHECK_I(vrr.guardband);
+ PIPE_CONF_CHECK_I(vrr.vsync_start);
+ PIPE_CONF_CHECK_I(vrr.vsync_end);
}
#undef PIPE_CONF_CHECK_X
@@ -5576,14 +5660,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
- drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ crtc->base.base.id, crtc->base.name);
else
new_crtc_state->uapi.mode_changed = false;
@@ -6237,27 +6323,37 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
- drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable);
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
continue;
- }
ret = intel_crtc_prepare_cleared_state(state, crtc);
if (ret)
- break;
+ goto fail;
if (!new_crtc_state->hw.enable)
continue;
ret = intel_modeset_pipe_config(state, crtc, limits);
if (ret)
- break;
+ goto fail;
+ }
- ret = intel_atomic_check_bigjoiner(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
+ continue;
+
+ if (!new_crtc_state->hw.enable)
+ continue;
+
+ ret = intel_modeset_pipe_config_late(state, crtc);
if (ret)
- break;
+ goto fail;
}
+fail:
if (ret)
*failed_pipe = crtc->pipe;
@@ -6353,16 +6449,26 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ continue;
+ }
+
+ ret = intel_atomic_check_bigjoiner(state, crtc);
+ if (ret)
+ goto fail;
+ }
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (new_crtc_state->hw.enable) {
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
- }
+ intel_bigjoiner_adjust_pipe_src(new_crtc_state);
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6644,17 +6750,21 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- /* VRR will be enable later, if required */
- intel_crtc_update_active_timings(new_crtc_state, false);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ /* VRR will be enable later, if required */
+ intel_crtc_update_active_timings(pipe_crtc_state, false);
+ }
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- return;
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -6746,31 +6856,42 @@ static void intel_update_crtc(struct intel_atomic_state *state,
}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- intel_crtc_disable_pipe_crc(crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_crtc_disable_pipe_crc(pipe_crtc);
dev_priv->display.funcs.display->crtc_disable(state, crtc);
- crtc->active = false;
- intel_fbc_disable(crtc);
- if (!new_crtc_state->hw.active)
- intel_initial_watermarks(state, crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *new_pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ pipe_crtc->active = false;
+ intel_fbc_disable(pipe_crtc);
+
+ if (!new_pipe_crtc_state->hw.active)
+ intel_initial_watermarks(state, pipe_crtc);
+ }
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- u32 handled = 0;
+ u8 disable_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -6778,21 +6899,31 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
+ /*
+ * Needs to be done even for pipes
+ * that weren't enabled previously.
+ */
intel_pre_plane_update(state, crtc);
if (!old_crtc_state->hw.active)
continue;
+ disable_pipes |= BIT(crtc->pipe);
+ }
+
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
+ continue;
+
intel_crtc_disable_planes(state, crtc);
}
/* Only disable port sync and MST slaves */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
/* In case of Transcoder port Sync master slave CRTCs can be
@@ -6801,28 +6932,28 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
* Slave vblanks are masked till Master Vblanks.
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
- !intel_dp_mst_is_slave_trans(old_crtc_state) &&
- !intel_crtc_is_bigjoiner_slave(old_crtc_state))
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
- handled |= BIT(crtc->pipe);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
/* Disable everything else left on */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state) ||
- (handled & BIT(crtc->pipe)))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
+
+ drm_WARN_ON(&i915->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -6889,9 +7020,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
+ intel_dbuf_mbus_pre_ddb_update(state);
+
while (update_pipes) {
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -6919,6 +7056,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ intel_dbuf_mbus_post_ddb_update(state);
+
update_pipes = modeset_pipes;
/*
@@ -6931,12 +7070,14 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_master(new_crtc_state) ||
- intel_crtc_is_bigjoiner_master(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- modeset_pipes &= ~BIT(pipe);
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6951,7 +7092,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- modeset_pipes &= ~BIT(pipe);
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6968,7 +7112,11 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -7165,7 +7313,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
- intel_mbus_dbox_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
@@ -7690,7 +7837,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->max_dotclk_freq;
+ int max_dotclock = i915->display.cdclk.max_dotclk_freq;
/* icl+ might use bigjoiner */
if (DISPLAY_VER(i915) >= 11)
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f4a0773f0fca..56d1c0e3e62c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -280,6 +280,12 @@ enum phy_fia {
base.head) \
for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
+#define for_each_intel_crtc_in_pipe_mask_reverse(dev, intel_crtc, pipe_mask) \
+ list_for_each_entry_reverse((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((pipe_mask) & BIT((intel_crtc)->pipe))
+
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
@@ -344,6 +350,14 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_new_intel_crtc_in_state_reverse(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -408,6 +422,7 @@ intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
@@ -448,6 +463,13 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
+
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
+bool intel_encoder_is_combo(struct intel_encoder *encoder);
+bool intel_encoder_is_snps(struct intel_encoder *encoder);
+bool intel_encoder_is_tc(struct intel_encoder *encoder);
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
+
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
new file mode 100644
index 000000000000..ad8545c8055d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+/*
+ * This header is for transitional struct intel_display conversion helpers only.
+ */
+
+#ifndef __INTEL_DISPLAY_CONVERSION__
+#define __INTEL_DISPLAY_CONVERSION__
+
+/*
+ * Transitional macro to optionally convert struct drm_i915_private * to struct
+ * intel_display *, also accepting the latter.
+ */
+#define __to_intel_display(p) \
+ _Generic(p, \
+ const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
+ struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct intel_display *: (p), \
+ struct intel_display *: (p))
+
+#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 2167dbee5eea..7715fc329057 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -26,6 +26,7 @@
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
+#include "intel_dmc_wl.h"
#include "intel_wm_types.h"
struct task_struct;
@@ -282,6 +283,9 @@ struct intel_wm {
};
struct intel_display {
+ /* drm device backpointer */
+ struct drm_device *drm;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -345,6 +349,8 @@ struct intel_display {
struct intel_global_obj obj;
unsigned int max_cdclk_freq;
+ unsigned int max_dotclk_freq;
+ unsigned int skl_preferred_vco_freq;
} cdclk;
struct {
@@ -446,6 +452,16 @@ struct intel_display {
} ips;
struct {
+ bool display_irqs_enabled;
+
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
+
+ u32 de_irq_mask[I915_MAX_PIPES];
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
+ } irq;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -534,6 +550,7 @@ struct intel_display {
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;
+ struct intel_dmc_wl wl;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index b99c024b0934..35f9f86ef70f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -31,6 +31,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_wm.h"
@@ -191,7 +192,7 @@ static void intel_hdcp_info(struct seq_file *m,
struct intel_connector *intel_connector,
bool remote_req)
{
- bool hdcp_cap, hdcp2_cap;
+ bool hdcp_cap = false, hdcp2_cap = false;
if (!intel_connector->hdcp.shim) {
seq_puts(m, "No Connector Support");
@@ -252,9 +253,6 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
const struct drm_display_mode *mode;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
@@ -271,28 +269,23 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!encoder)
- return;
-
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
+ if (intel_connector->mst_port)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
+ intel_hdmi_info(m, intel_connector);
break;
default:
break;
}
seq_puts(m, "\tHDCP version: ");
- if (intel_encoder_is_mst(encoder)) {
+ if (intel_connector->mst_port) {
intel_hdcp_info(m, intel_connector, true);
seq_puts(m, "\tMST Hub HDCP version: ");
}
@@ -645,51 +638,24 @@ static int i915_display_capabilities(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
struct intel_shared_dpll *pll;
int i;
drm_modeset_lock_all(&dev_priv->drm);
- seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
dev_priv->display.dpll.ref_clks.nssc,
dev_priv->display.dpll.ref_clks.ssc);
for_each_shared_dpll(dev_priv, pll, i) {
- seq_printf(m, "DPLL%i: %s, id: %i\n", pll->index,
+ drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
- seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
pll->state.pipe_mask, pll->active_mask,
str_yes_no(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ drm_printf(&p, " tracked hardware state:\n");
+ intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state);
}
drm_modeset_unlock_all(&dev_priv->drm);
@@ -1103,27 +1069,6 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_params(i915);
}
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-
- if (connector->base.status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->pps.panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->pps.panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->pps.backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->pps.backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -1402,20 +1347,6 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
-static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
-
- return 0;
-}
-
static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -1437,30 +1368,6 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
return len;
}
-static ssize_t i915_bigjoiner_enable_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
- bool bigjoiner_en = 0;
- int ret;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
- if (ret < 0)
- return ret;
-
- connector->force_bigjoiner_enable = bigjoiner_en;
- *offp += len;
-
- return len;
-}
-
static int i915_dsc_output_format_open(struct inode *inode,
struct file *file)
{
@@ -1554,8 +1461,6 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
.write = i915_dsc_fractional_bpp_write
};
-DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
-
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1608,12 +1513,9 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
return;
intel_drrs_connector_debugfs_add(connector);
+ intel_pps_connector_debugfs_add(connector);
intel_psr_connector_debugfs_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", 0444, root,
- connector, &i915_panel_fops);
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
@@ -1640,8 +1542,8 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
if (DISPLAY_VER(i915) >= 11 &&
(connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
- debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
- connector, &i915_bigjoiner_enable_fops);
+ debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root,
+ &connector->force_bigjoiner_enable);
}
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index c02d79b50006..120e209ee74a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -17,6 +17,9 @@
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
+
static const struct intel_display_device_info no_display = {};
#define PIPE_A_OFFSET 0x70000
@@ -768,6 +771,8 @@ static const struct intel_display_device_info xe2_lpd_display = {
BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D),
};
+__diag_pop();
+
/*
* Separate detection for no display cases to keep the display id array simple.
*
@@ -922,6 +927,9 @@ void intel_display_device_probe(struct drm_i915_private *i915)
const struct intel_display_device_info *info;
u16 ver, rel, step;
+ /* Add drm device backpointer as early as possible. */
+ i915->display.drm = &i915->drm;
+
if (HAS_GMD_ID(i915))
info = probe_gmdid_display(i915, &ver, &rel, &step);
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index fe4268813786..17ddf82f0b6e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_i915_private;
@@ -47,6 +48,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
@@ -68,6 +70,7 @@ struct drm_printer;
#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
+#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
@@ -98,8 +101,8 @@ struct drm_printer;
(IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__i915), (from), (until)))
-#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info)
+#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 87dd07e0d138..89bd032ed995 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -11,6 +11,7 @@
#include <acpi/video.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
@@ -98,7 +99,6 @@ void intel_display_driver_init_hw(struct drm_i915_private *i915)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.get_format_info = intel_fb_get_format_info,
- .output_poll_changed = intel_fbdev_output_poll_changed,
.mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
@@ -198,11 +198,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
+ intel_dmc_wl_init(&i915->display);
}
/* part #1: call before irq install */
int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
int ret;
if (i915_inject_probe_failure(i915))
@@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- intel_init_quirks(i915);
+ intel_init_quirks(display);
intel_fbc_init(i915);
@@ -514,10 +516,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
intel_overlay_setup(i915);
- ret = intel_fbdev_init(&i915->drm);
- if (ret)
- return ret;
-
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -545,16 +543,6 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_debugfs_register(i915);
/*
- * Some ports require correctly set-up hpd registers for
- * detection to work properly (leading to ghost connected
- * connector status), e.g. VGA on gm45. Hence we can only set
- * up the initial fbdev config after hpd irqs are fully
- * enabled. We do it last so that the async config cannot run
- * before the connectors are registered.
- */
- intel_fbdev_initial_config_async(i915);
-
- /*
* We need to coordinate the hotplugs with the asynchronous
* fbdev configuration, for which we use the
* fbdev->async_cookie.
@@ -562,6 +550,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
drm_kms_helper_poll_init(&i915->drm);
intel_hpd_poll_disable(i915);
+ intel_fbdev_setup(i915);
+
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
}
@@ -597,9 +587,6 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(i915);
-
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
@@ -638,7 +625,8 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_fbdev_unregister(i915);
+ drm_client_dev_unregister(&i915->drm);
+
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index f846c5b108b5..c337e0597541 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -117,13 +117,14 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->de_irq_mask[pipe];
+ new_val = dev_priv->display.irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->de_irq_mask[pipe]) {
- dev_priv->de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
+ dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe]);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -179,7 +180,7 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+ u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -233,10 +234,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -256,10 +257,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -401,7 +402,7 @@ void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->pipestat_irq_mask[pipe] = 0;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
}
}
@@ -412,7 +413,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled) {
+ if (!dev_priv->display.irq.display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -445,7 +446,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->pipestat_irq_mask[pipe];
+ status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
@@ -1203,7 +1204,7 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
/*
* Vblank interrupts fail to wake the device up from C2+.
@@ -1211,8 +1212,8 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* the problem. There is a small power cost so we do this
* only when vblank interrupts are actually enabled.
*/
- if (dev_priv->vblank_enabled++ == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (i915->display.irq.vblank_enabled++ == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -1315,12 +1316,12 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->vblank_enabled == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (--i915->display.irq.vblank_enabled == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -1497,8 +1498,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
- ~dev_priv->de_irq_mask[pipe] | extra_ier);
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1558,10 +1559,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = true;
+ dev_priv->display.irq.display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
vlv_display_irq_reset(dev_priv);
@@ -1573,10 +1574,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled)
+ if (!dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = false;
+ dev_priv->display.irq.display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
vlv_display_irq_reset(dev_priv);
@@ -1694,12 +1695,12 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
for_each_pipe(dev_priv, pipe) {
- dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
+ dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
+ dev_priv->display.irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -1770,9 +1771,9 @@ void intel_display_irq_init(struct drm_i915_private *i915)
* domain. We defer setting up the display irqs in this case to the
* runtime pm.
*/
- i915->display_irqs_enabled = true;
+ i915->display.irq.display_irqs_enabled = true;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display_irqs_enabled = false;
+ i915->display.irq.display_irqs_enabled = false;
intel_hotplug_irq_init(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 11e03cfb774d..1799a6643128 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -27,6 +27,10 @@ static struct intel_display_params intel_display_modparams __read_mostly = {
* debugfs mode to 0.
*/
+intel_display_param_named_unsafe(dmc_firmware_path, charp, 0400,
+ "DMC firmware path to use instead of the default one. "
+ "Use /dev/null to disable DMC and runtime PM.");
+
intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
@@ -116,6 +120,11 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
+intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+ "Enable DMC wakelock "
+ "(0=disabled, 1=enabled) "
+ "Default: 0");
+
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
const char *name, bool val)
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 6206cc51df04..1208a62c16d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -24,6 +24,7 @@ struct drm_i915_private;
* debugfs file
*/
#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, vbt_firmware, NULL, 0400) \
param(int, lvds_channel_mode, 0, 0400) \
param(int, panel_use_ssc, -1, 0600) \
@@ -46,6 +47,7 @@ struct drm_i915_private;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
+ param(bool, enable_dmc_wl, false, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 6fd4fa52253a..03dc7edcc443 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -640,13 +640,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
- /*
- * The caller must hold already raw wakeref, upgrade that to a proper
- * wakeref to make the state checker happy about the HW access during
- * power well disabling.
- */
- assert_rpm_raw_wakeref_held(rpm);
- wakeref = intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get_noresume(rpm);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 06900ff307b2..83f616097a29 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -17,6 +17,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux_regs.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -26,6 +27,7 @@
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -199,6 +201,9 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
+#define ICL_AUX_PW_TO_PHY(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
+
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
@@ -217,27 +222,22 @@ static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = NULL;
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+
/* We'll check the MST primary port */
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
dig_port = enc_to_dig_port(encoder);
- if (!dig_port)
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
- break;
+ if (dig_port && dig_port->aux_ch == aux_ch)
+ return dig_port;
}
- return dig_port;
+ return NULL;
}
static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
@@ -253,7 +253,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
* as HDMI-only and routed to a combo PHY, the encoder either won't be
* present at all or it will not have an aux_ch assigned.
*/
- return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
+ return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,17 +396,11 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&i915->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_EDP &&
- encoder->port == port)
- return true;
- }
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return false;
+ return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
@@ -415,24 +409,25 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- 0, ICL_LANE_ENABLE_AUX);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_port_is_edp(dev_priv, (enum port)phy))
- intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
- 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
+ !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
@@ -441,14 +436,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- ICL_LANE_ENABLE_AUX, 0);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
@@ -827,6 +823,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -856,6 +854,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -906,39 +906,39 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
if (IS_GEMINILAKE(dev_priv)) {
power_well = lookup_power_well(dev_priv,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
}
}
@@ -976,16 +976,18 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ intel_dmc_wl_disable(&dev_priv->display);
+
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
+ intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
+ bxt_verify_dpio_phy_power_wells(dev_priv);
if (DISPLAY_VER(dev_priv) >= 11)
/*
@@ -1396,8 +1398,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
@@ -1441,9 +1443,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
@@ -1519,9 +1521,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
return;
if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
+ reg = CHV_CMN_DW0_CH0;
else
- reg = _CHV_CMN_DW6_CH1;
+ reg = CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
val = vlv_dpio_read(dev_priv, phy, reg);
@@ -1552,10 +1554,11 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
}
if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
+ DPIO_ALLDL_POWERDOWN_CH0, val);
else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
+ DPIO_ALLDL_POWERDOWN_CH1, val);
drm_WARN(&dev_priv->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 2f07b7afa3bf..b83ad06f2ea7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -29,21 +29,21 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PIPE3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PORT3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->pipe_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->pipe_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_TRANS2(tran, reg) _MMIO(DISPLAY_INFO(dev_priv)->trans_offsets[(tran)] - \
- DISPLAY_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_CURSOR2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->cursor_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->cursor_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_PIPE2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->pipe_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->pipe_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_TRANS2(display, tran, reg) _MMIO(DISPLAY_INFO(display)->trans_offsets[(tran)] - \
+ DISPLAY_INFO(display)->trans_offsets[TRANSCODER_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_CURSOR2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->cursor_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->cursor_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
#endif /* __INTEL_DISPLAY_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e67cd5b02e84..62f7a30c37dc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -661,7 +661,8 @@ struct intel_digital_connector_state {
int broadcast_rgb;
};
-#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
+#define to_intel_digital_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_digital_connector_state, base)
struct dpll {
/* given values */
@@ -727,6 +728,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1002,18 +1004,6 @@ enum intel_output_format {
INTEL_OUTPUT_FORMAT_YCBCR444,
};
-struct intel_mpllb_state {
- u32 clock; /* in KHz */
- u32 ref_control;
- u32 mpllb_cp;
- u32 mpllb_div;
- u32 mpllb_div2;
- u32 mpllb_fracn1;
- u32 mpllb_fracn2;
- u32 mpllb_sscen;
- u32 mpllb_sscstep;
-};
-
/* Used by dp and fdi links */
struct intel_link_m_n {
u32 tu;
@@ -1029,31 +1019,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-struct intel_c10pll_state {
- u32 clock; /* in KHz */
- u8 tx;
- u8 cmn;
- u8 pll[20];
-};
-
-struct intel_c20pll_state {
- u32 clock; /* in kHz */
- u16 tx[3];
- u16 cmn[4];
- union {
- u16 mplla[10];
- u16 mpllb[11];
- };
-};
-
-struct intel_cx0pll_state {
- union {
- struct intel_c10pll_state c10;
- struct intel_c20pll_state c20;
- };
- bool ssc_enabled;
-};
-
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1198,11 +1163,7 @@ struct intel_crtc_state {
struct intel_shared_dpll *shared_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
- union {
- struct intel_dpll_hw_state dpll_hw_state;
- struct intel_mpllb_state mpllb_state;
- struct intel_cx0pll_state cx0pll_state;
- };
+ struct intel_dpll_hw_state dpll_hw_state;
/*
* ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
@@ -1345,6 +1306,7 @@ struct intel_crtc_state {
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
struct drm_dp_vsc_sdp vsc;
+ struct drm_dp_as_sdp as_sdp;
} infoframes;
u8 eld[MAX_ELD_BYTES];
@@ -1422,6 +1384,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ u32 pipe_srcsz_early_tpt;
+
struct drm_rect psr2_su_area;
/* Variable Refresh Rate state */
@@ -1429,6 +1393,7 @@ struct intel_crtc_state {
bool enable, in_range;
u8 pipeline_full;
u16 flipline, vmin, vmax, guardband;
+ u32 vsync_end, vsync_start;
} vrr;
/* Stream Splitter for eDP MSO */
@@ -1617,12 +1582,17 @@ struct intel_watermark_params {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
-#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
+
+#define to_intel_crtc_state(crtc_state) \
+ container_of_const((crtc_state), struct intel_crtc_state, uapi)
+#define to_intel_plane_state(plane_state) \
+ container_of_const((plane_state), struct intel_plane_state, uapi)
+#define to_intel_framebuffer(fb) \
+ container_of_const((fb), struct intel_framebuffer, base)
+
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1737,6 +1707,8 @@ struct intel_psr {
/* LNL and beyond */
u8 check_entry_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
} alpm_parameters;
ktime_t last_entry_attempt;
@@ -1798,6 +1770,7 @@ struct intel_dp {
bool is_mst;
int active_mst_links;
+ enum drm_dp_mst_mode mst_detect;
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -2183,4 +2156,41 @@ static inline int to_bpp_x16(int bpp)
return bpp << 4;
}
+/*
+ * Conversion functions/macros from various pointer types to struct
+ * intel_display pointer.
+ */
+#define __drm_device_to_intel_display(p) \
+ (&to_i915(p)->display)
+#define __intel_connector_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->uapi.crtc->dev)
+#define __intel_digital_port_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.base.dev)
+#define __intel_dp_to_intel_display(p) \
+ __drm_device_to_intel_display(dp_to_dig_port(p)->base.base.dev)
+#define __intel_encoder_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_hdmi_to_intel_display(p) \
+ __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev)
+
+/* Helper for generic association. Map types to conversion functions/macros. */
+#define __assoc(type, p) \
+ struct type: __##type##_to_intel_display((struct type *)(p))
+
+/* Convert various pointer types to struct intel_display pointer. */
+#define to_intel_display(p) \
+ _Generic(*p, \
+ __assoc(drm_device, p), \
+ __assoc(intel_connector, p), \
+ __assoc(intel_crtc, p), \
+ __assoc(intel_crtc_state, p), \
+ __assoc(intel_digital_port, p), \
+ __assoc(intel_dp, p), \
+ __assoc(intel_encoder, p), \
+ __assoc(intel_hdmi, p))
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index ac136fd992ba..e5a8022db664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -10,20 +10,12 @@
static void gen11_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14010594013 */
intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
static void xe_d_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14013723622 */
intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 835781624482..cbd2ac5671b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include "i915_drv.h"
@@ -38,6 +39,8 @@
* low-power state and comes back to normal.
*/
+#define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
+
enum intel_dmc_id {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
@@ -71,6 +74,21 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
return i915->display.dmc.dmc;
}
+static const char *dmc_firmware_param(struct drm_i915_private *i915)
+{
+ const char *p = i915->display.params.dmc_firmware_path;
+
+ return p && *p ? p : NULL;
+}
+
+static bool dmc_firmware_param_disabled(struct drm_i915_private *i915)
+{
+ const char *p = dmc_firmware_param(i915);
+
+ /* Magic path to indicate disabled */
+ return p && !strcmp(p, "/dev/null");
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
@@ -89,10 +107,14 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
__stringify(major) "_" \
__stringify(minor) ".bin"
+#define XE2LPD_DMC_MAX_FW_SIZE 0x8000
#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd)
+MODULE_FIRMWARE(XE2LPD_DMC_PATH);
+
#define MTL_DMC_PATH DMC_PATH(mtl)
MODULE_FIRMWARE(MTL_DMC_PATH);
@@ -136,6 +158,59 @@ MODULE_FIRMWARE(SKL_DMC_PATH);
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
+static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size)
+{
+ const char *fw_path = NULL;
+ u32 max_fw_size = 0;
+
+ if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) {
+ fw_path = XE2LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
+ fw_path = MTL_DMC_PATH;
+ max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
+ fw_path = DG2_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(i915)) {
+ fw_path = ADLP_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_S(i915)) {
+ fw_path = ADLS_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_DG1(i915)) {
+ fw_path = DG1_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_ROCKETLAKE(i915)) {
+ fw_path = RKL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_TIGERLAKE(i915)) {
+ fw_path = TGL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER(i915) == 11) {
+ fw_path = ICL_DMC_PATH;
+ max_fw_size = ICL_DMC_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(i915)) {
+ fw_path = GLK_DMC_PATH;
+ max_fw_size = GLK_DMC_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ fw_path = KBL_DMC_PATH;
+ max_fw_size = KBL_DMC_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(i915)) {
+ fw_path = SKL_DMC_PATH;
+ max_fw_size = SKL_DMC_MAX_FW_SIZE;
+ } else if (IS_BROXTON(i915)) {
+ fw_path = BXT_DMC_PATH;
+ max_fw_size = BXT_DMC_MAX_FW_SIZE;
+ }
+
+ *size = max_fw_size;
+
+ return fw_path;
+}
+
#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define PACKAGE_MAX_FW_INFO_ENTRIES 20
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
@@ -546,6 +621,8 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
pipedmc_clock_gating_wa(i915, true);
disable_all_event_handlers(i915);
pipedmc_clock_gating_wa(i915, false);
+
+ intel_dmc_wl_disable(&i915->display);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
@@ -845,7 +922,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
+static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
@@ -858,13 +935,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
u32 r, offset;
if (!fw)
- return;
+ return -EINVAL;
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
r = parse_dmc_fw_css(dmc, css_header, fw->size);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -872,7 +949,7 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
package_header = (struct intel_package_header *)&fw->data[readcount];
r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -889,6 +966,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
+
+ if (!intel_dmc_has_payload(i915)) {
+ drm_err(&i915->drm, "DMC firmware main program not found\n");
+ return -ENOENT;
+ }
+
+ return 0;
}
static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
@@ -923,7 +1007,7 @@ static void dmc_load_work_fn(struct work_struct *work)
err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
- if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ if (err == -ENOENT && !dmc_firmware_param(i915)) {
fallback_path = dmc_fallback_path(i915);
if (fallback_path) {
drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
@@ -934,24 +1018,31 @@ static void dmc_load_work_fn(struct work_struct *work)
}
}
- parse_dmc_fw(dmc, fw);
-
- if (intel_dmc_has_payload(i915)) {
- intel_dmc_load_program(i915);
- intel_dmc_runtime_pm_put(i915);
-
- drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
- dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
- } else {
+ if (err) {
drm_notice(&i915->drm,
- "Failed to load DMC firmware %s."
- " Disabling runtime power management.\n",
- dmc->fw_path);
+ "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
drm_notice(&i915->drm, "DMC firmware homepage: %s",
- INTEL_UC_FIRMWARE_URL);
+ INTEL_DMC_FIRMWARE_URL);
+ return;
}
+ err = parse_dmc_fw(dmc, fw);
+ if (err) {
+ drm_notice(&i915->drm,
+ "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
+ goto out;
+ }
+
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
+
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+
+out:
release_firmware(fw);
}
@@ -987,56 +1078,16 @@ void intel_dmc_init(struct drm_i915_private *i915)
INIT_WORK(&dmc->work, dmc_load_work_fn);
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
- dmc->fw_path = MTL_DMC_PATH;
- dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
- dmc->fw_path = DG2_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
- dmc->fw_path = ADLP_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
- dmc->fw_path = ADLS_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
- dmc->fw_path = DG1_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
- dmc->fw_path = RKL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
- dmc->fw_path = TGL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(i915) == 11) {
- dmc->fw_path = ICL_DMC_PATH;
- dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
- dmc->fw_path = GLK_DMC_PATH;
- dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
- dmc->fw_path = KBL_DMC_PATH;
- dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
- dmc->fw_path = SKL_DMC_PATH;
- dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
- dmc->fw_path = BXT_DMC_PATH;
- dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
- }
-
- if (i915->params.dmc_firmware_path) {
- if (strlen(i915->params.dmc_firmware_path) == 0) {
- drm_info(&i915->drm,
- "Disabling DMC firmware and runtime PM\n");
- goto out;
- }
+ dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size);
- dmc->fw_path = i915->params.dmc_firmware_path;
+ if (dmc_firmware_param_disabled(i915)) {
+ drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n");
+ goto out;
}
+ if (dmc_firmware_param(i915))
+ dmc->fw_path = dmc_firmware_param(i915);
+
if (!dmc->fw_path) {
drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
@@ -1072,6 +1123,8 @@ void intel_dmc_suspend(struct drm_i915_private *i915)
if (dmc)
flush_work(&dmc->work);
+ intel_dmc_wl_disable(&i915->display);
+
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(i915))
intel_dmc_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 90d0dbb41cfe..1bf446f96a10 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -97,4 +97,10 @@
#define TGL_DMC_DEBUG3 _MMIO(0x101090)
#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
+#define DMC_WAKELOCK_CFG _MMIO(0x8F1B0)
+#define DMC_WAKELOCK_CFG_ENABLE REG_BIT(31)
+#define DMC_WAKELOCK1_CTL _MMIO(0x8F140)
+#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
+#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
new file mode 100644
index 000000000000..d9864b9cc429
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "intel_de.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dmc_wl.h"
+
+/**
+ * DOC: DMC wakelock support
+ *
+ * Wake lock is the mechanism to cause display engine to exit DC
+ * states to allow programming to registers that are powered down in
+ * those states. Previous projects exited DC states automatically when
+ * detecting programming. Now software controls the exit by
+ * programming the wake lock. This improves system performance and
+ * system interactions and better fits the flip queue style of
+ * programming. Wake lock is only required when DC5, DC6, or DC6v have
+ * been enabled in DC_STATE_EN and the wake lock mode of operation has
+ * been enabled.
+ *
+ * The wakelock mechanism in DMC allows the display engine to exit DC
+ * states explicitly before programming registers that may be powered
+ * down. In earlier hardware, this was done automatically and
+ * implicitly when the display engine accessed a register. With the
+ * wakelock implementation, the driver asserts a wakelock in DMC,
+ * which forces it to exit the DC state until the wakelock is
+ * deasserted.
+ *
+ * The mechanism can be enabled and disabled by writing to the
+ * DMC_WAKELOCK_CFG register. There are also 13 control registers
+ * that can be used to hold and release different wakelocks. In the
+ * current implementation, we only need one wakelock, so only
+ * DMC_WAKELOCK1_CTL is used. The other definitions are here for
+ * potential future use.
+ */
+
+#define DMC_WAKELOCK_CTL_TIMEOUT 5
+#define DMC_WAKELOCK_HOLD_TIME 50
+
+struct intel_dmc_wl_range {
+ u32 start;
+ u32 end;
+};
+
+static struct intel_dmc_wl_range lnl_wl_range[] = {
+ { .start = 0x60000, .end = 0x7ffff },
+};
+
+static void __intel_dmc_wl_release(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_dmc_wl *wl = &display->wl;
+
+ WARN_ON(refcount_read(&wl->refcount));
+
+ queue_delayed_work(i915->unordered_wq, &wl->work,
+ msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
+}
+
+static void intel_dmc_wl_work(struct work_struct *work)
+{
+ struct intel_dmc_wl *wl =
+ container_of(work, struct intel_dmc_wl, work.work);
+ struct intel_display *display =
+ container_of(wl, struct intel_display, wl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ /* Bail out if refcount reached zero while waiting for the spinlock */
+ if (!refcount_read(&wl->refcount))
+ goto out_unlock;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock release timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+static bool intel_dmc_wl_check_range(u32 address)
+{
+ int i;
+ bool wl_needed = false;
+
+ for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
+ if (address >= lnl_wl_range[i].start &&
+ address <= lnl_wl_range[i].end) {
+ wl_needed = true;
+ break;
+ }
+ }
+
+ return wl_needed;
+}
+
+static bool __intel_dmc_wl_supported(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_dmc_has_payload(i915) ||
+ !display->params.enable_dmc_wl)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_wl_init(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
+ if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ return;
+
+ INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
+ spin_lock_init(&wl->lock);
+ refcount_set(&wl->refcount, 0);
+}
+
+void intel_dmc_wl_enable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (wl->enabled)
+ goto out_unlock;
+
+ /*
+ * Enable wakelock in DMC. We shouldn't try to take the
+ * wakelock, because we're just enabling it, so call the
+ * non-locking version directly here.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
+
+ wl->enabled = true;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_disable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ flush_delayed_work(&wl->work);
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ /* Disable wakelock in DMC */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
+
+ refcount_set(&wl->refcount, 0);
+ wl->enabled = false;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ cancel_delayed_work(&wl->work);
+
+ if (refcount_inc_not_zero(&wl->refcount))
+ goto out_unlock;
+
+ refcount_set(&wl->refcount, 1);
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (!wl->taken) {
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
+ "Tried to put wakelock with refcount zero\n"))
+ goto out_unlock;
+
+ if (refcount_dec_and_test(&wl->refcount)) {
+ __intel_dmc_wl_release(display);
+
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
new file mode 100644
index 000000000000..adab51208d0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_WAKELOCK_H__
+#define __INTEL_WAKELOCK_H__
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+
+#include "i915_reg_defs.h"
+
+struct intel_display;
+
+struct intel_dmc_wl {
+ spinlock_t lock; /* protects enabled, taken and refcount */
+ bool enabled;
+ bool taken;
+ refcount_t refcount;
+ struct delayed_work work;
+};
+
+void intel_dmc_wl_init(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+
+#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0c3ed37b350..e05e25cd4a94 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -67,6 +67,7 @@
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -88,6 +89,9 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+/* Max DSC line buffer depth supported by HW. */
+#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
+
/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
@@ -122,6 +126,14 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return HAS_AS_SDP(i915) &&
+ drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
+}
+
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
@@ -213,7 +225,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
* Sink rates for 128b/132b. If set, sink should support all 8b/10b
* rates and 10 Gbps.
*/
- if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ if (drm_dp_128b132b_supported(intel_dp->dpcd)) {
u8 uhbr_rates = 0;
BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
@@ -424,7 +436,7 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
return max_rate;
}
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -442,11 +454,9 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -462,11 +472,9 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return 810000;
return 2000000;
@@ -498,7 +506,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* The values must be in increasing order */
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
- 810000, 1000000, 1350000, 2000000,
+ 810000, 1000000, 2000000,
};
static const int icl_rates[] = {
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
@@ -1197,15 +1205,15 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
}
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_connector *connector = intel_dp->attached_connector;
- if (!intel_dp_can_bigjoiner(intel_dp))
+ if (!intel_dp_has_bigjoiner(intel_dp))
return false;
- return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+ return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
connector->force_bigjoiner_enable;
}
@@ -1219,7 +1227,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
@@ -1232,6 +1240,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
status = intel_panel_mode_valid(connector, mode);
@@ -1241,10 +1252,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
}
@@ -1305,11 +1314,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1421,7 +1426,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@@ -1702,7 +1708,6 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- u8 line_buf_depth;
int ret;
/*
@@ -1731,20 +1736,14 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd);
- if (!line_buf_depth) {
+ vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
+ drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
+ if (!vdsc_cfg->line_buf_depth) {
drm_dbg_kms(&i915->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
-
vdsc_cfg->block_pred_enable =
connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
@@ -1916,8 +1915,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
@@ -2398,6 +2398,16 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
+{
+ /*
+ * Pipe joiner needs compression up to display 12 due to bandwidth
+ * limitation. DG2 onwards pipe joiner can be enabled without
+ * compression.
+ */
+ return DISPLAY_VER(i915) < 13 && use_joiner;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2406,30 +2416,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
- bool joiner_needs_dsc = false;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
return -EINVAL;
- if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
- /*
- * Pipe joiner needs compression up to display 12 due to bandwidth
- * limitation. DG2 onwards pipe joiner can be enabled without
- * compression.
- */
- joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2612,6 +2617,29 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
}
+static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->vrr.enable ||
+ !intel_dp_as_sdp_supported(intel_dp))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+
+ /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
+ as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
+ as_sdp->length = 0x9;
+ as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->vtotal = adjusted_mode->vtotal;
+ as_sdp->target_rr = 0;
+ as_sdp->duration_incr_ms = 0;
+ as_sdp->duration_incr_ms = 0;
+}
+
static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2683,15 +2711,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
- return true;
-
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
-}
-
static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
@@ -2714,7 +2733,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2731,7 +2750,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -2972,6 +2995,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
+ intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
@@ -3364,6 +3388,14 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
+ if (CAN_PANEL_REPLAY(intel_dp)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
return fastset;
}
@@ -4047,39 +4079,84 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp->downstream_ports) == 0;
}
-static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
+{
+ if (mst_mode == DRM_DP_MST)
+ return "MST";
+ else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
+ return "SST w/ sideband messaging";
+ else
+ return "SST";
+}
+
+static enum drm_dp_mst_mode
+intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
+ enum drm_dp_mst_mode sink_mst_mode)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->display.params.enable_dp_mst &&
- intel_dp_mst_source_support(intel_dp) &&
- drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ if (!i915->display.params.enable_dp_mst)
+ return DRM_DP_SST;
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ return DRM_DP_SST;
+
+ if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
+ !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
+ return DRM_DP_SST;
+
+ return sink_mst_mode;
}
-static void
-intel_dp_configure_mst(struct intel_dp *intel_dp)
+static enum drm_dp_mst_mode
+intel_dp_mst_detect(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_encoder *encoder =
- &dp_to_dig_port(intel_dp)->base;
- bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum drm_dp_mst_mode sink_mst_mode;
+ enum drm_dp_mst_mode mst_detect;
+
+ sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+
+ mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
- str_yes_no(sink_can_mst),
- str_yes_no(i915->display.params.enable_dp_mst));
+ intel_dp_mst_mode_str(sink_mst_mode),
+ str_yes_no(i915->display.params.enable_dp_mst),
+ intel_dp_mst_mode_str(mst_detect));
+
+ return mst_detect;
+}
+static void
+intel_dp_mst_configure(struct intel_dp *intel_dp)
+{
if (!intel_dp_mst_source_support(intel_dp))
return;
- intel_dp->is_mst = sink_can_mst &&
- i915->display.params.enable_dp_mst;
+ intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+
+ /* Avoid stale info on the next detect cycle. */
+ intel_dp->mst_detect = DRM_DP_SST;
+}
+
+static void
+intel_dp_mst_disconnect(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp->is_mst)
+ return;
+
+ drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
static bool
@@ -4127,6 +4204,32 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
+static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
+ struct dp_sdp *sdp, size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ /* Prepare AS (Adaptive Sync) SDP Header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = as_sdp->sdp_type;
+ sdp->sdp_header.HB2 = 0x02;
+ sdp->sdp_header.HB3 = as_sdp->length;
+
+ /* Fill AS (Adaptive Sync) SDP Payload */
+ sdp->db[0] = as_sdp->mode;
+ sdp->db[1] = as_sdp->vtotal & 0xFF;
+ sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
+ sdp->db[3] = as_sdp->target_rr & 0xFF;
+ sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
+
+ return length;
+}
+
static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
const struct hdmi_drm_infoframe *drm_infoframe,
@@ -4226,6 +4329,10 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
+ sizeof(sdp));
+ break;
default:
MISSING_CASE(type);
return;
@@ -4247,6 +4354,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+
+ if (HAS_AS_SDP(dev_priv))
+ dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
+
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
@@ -4264,10 +4375,42 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
return;
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
+static
+int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(as_sdp, 0, sizeof(*as_sdp));
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB2 != 0x02)
+ return -EINVAL;
+
+ if ((sdp->sdp_header.HB3 & 0x3F) != 9)
+ return -EINVAL;
+
+ as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
+ as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
+ as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
+ as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
+
+ return 0;
+}
+
static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
const void *buffer, size_t size)
{
@@ -4338,6 +4481,29 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
return 0;
}
+static void
+intel_read_dp_as_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_as_sdp *as_sdp)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_ADAPTIVE_SYNC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+}
+
static int
intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
const void *buffer, size_t size)
@@ -4444,6 +4610,10 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
&crtc_state->infoframes.drm.drm);
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ intel_read_dp_as_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.as_sdp);
+ break;
default:
MISSING_CASE(type);
break;
@@ -5371,6 +5541,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
+ intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
+
/* if there's no downstream port, we're done */
if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
@@ -5382,7 +5554,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
connector_status_connected : connector_status_disconnected;
}
- if (intel_dp_can_mst(intel_dp))
+ if (intel_dp->mst_detect == DRM_DP_MST)
return connector_status_connected;
/* If no HPD, poke DDC gently */
@@ -5687,15 +5859,7 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
- if (intel_dp->is_mst) {
- drm_dbg_kms(&dev_priv->drm,
- "MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
- }
+ intel_dp_mst_disconnect(intel_dp);
intel_dp_tunnel_disconnect(intel_dp);
@@ -5714,7 +5878,7 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
- intel_dp_configure_mst(intel_dp);
+ intel_dp_mst_configure(intel_dp);
/*
* TODO: Reset link params when switching to MST mode, until MST
@@ -6497,7 +6661,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -6522,7 +6685,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
@@ -6565,6 +6728,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index c540d3a73fe7..106ecfde36d9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -88,6 +88,7 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
@@ -119,7 +120,8 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner);
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
@@ -149,6 +151,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4f4a0e3b3114..b8a53bb174da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -61,9 +61,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
u32 status;
int ret;
- ret = __intel_de_wait_for_register(i915, ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY, 0,
- 2, timeout_ms, &status);
+ ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ 2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(&i915->drm,
@@ -143,9 +142,15 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble;
}
-static int intel_dp_aux_fw_sync_len(void)
+int intel_dp_aux_fw_sync_len(void)
{
- int precharge = 10; /* 10-16 */
+ /*
+ * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
+ * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
+ * is fixing these problems with the panel. It is still within range
+ * mentioned in eDP specification.
+ */
+ int precharge = 12; /* 10-16 */
int preamble = 8;
return precharge + preamble;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 8447f3e601fe..76d1f2ed7c2f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -20,5 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
+int intel_dp_aux_fw_sync_len(void);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b98a87883fef..92b03073acdd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
+ *hdcp_capable = false;
+ *hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
@@ -766,11 +769,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
- stream_enc_status,
- enable ? stream_enc_status : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status, enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
@@ -801,11 +802,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
- STREAM_ENCRYPTION_STATUS,
- enable ? STREAM_ENCRYPTION_STATUS : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index fb84ca98bb7a..947575140059 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -334,7 +334,7 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 11;
+ DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 53aec023ce92..c772ba19c547 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -51,25 +51,39 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
-static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
- const struct drm_display_mode *adjusted_mode,
- struct intel_crtc_state *crtc_state,
- bool dsc)
+static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
+ bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
- int output_bpp = bpp;
- /* DisplayPort 2 128b/132b, bits per lane is always 32 */
- int symbol_clock = crtc_state->port_clock / 32;
-
- if (output_bpp * adjusted_mode->crtc_clock >=
- symbol_clock * 72) {
- drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n",
- output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72);
- return -EINVAL;
- }
- }
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- return 0;
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ return INT_MAX;
+
+ /*
+ * DSC->DPT interface width:
+ * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
+ * LNL+: 144 bits (not a bottleneck in any config)
+ *
+ * Bspec/49259 suggests that the FEC overhead needs to be
+ * applied here, though HW people claim that neither this FEC
+ * or any other overhead is applicable here (that is the actual
+ * available_bw is just symbol_clock * 72). However based on
+ * testing on MTL-P the
+ * - DELL U3224KBA display
+ * - Unigraf UCD-500 CTS test sink
+ * devices the
+ * - 5120x2880/995.59Mhz
+ * - 6016x3384/1357.23Mhz
+ * - 6144x3456/1413.39Mhz
+ * modes (all the ones having a DPT limit on the above devices),
+ * both the channel coding efficiency and an additional 3%
+ * overhead needs to be accounted for.
+ */
+ return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
+ drm_dp_bw_channel_coding_efficiency(true)),
+ mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
@@ -88,11 +102,10 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
if (dsc) {
flags |= DRM_DP_BW_OVERHEAD_DSC;
- /* TODO: add support for bigjoiner */
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
adjusted_mode->clock,
adjusted_mode->hdisplay,
- false);
+ crtc_state->bigjoiner_pipes);
}
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
@@ -158,6 +171,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int max_dpt_bpp;
int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
@@ -178,6 +192,13 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->port_clock,
crtc_state->lane_count);
+ max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
+ if (max_bpp > max_dpt_bpp) {
+ drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ max_bpp, max_dpt_bpp);
+ max_bpp = max_dpt_bpp;
+ }
+
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
@@ -189,10 +210,6 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
- ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
- if (ret)
- continue;
-
link_bpp_x16 = to_bpp_x16(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
@@ -404,15 +421,22 @@ static int mode_hblank_period_ns(const struct drm_display_mode *mode)
static bool
hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct link_config_limits *limits)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ bool is_uhbr_sink = connector->mst_port &&
+ drm_dp_128b132b_supported(connector->mst_port->dpcd);
+ int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
return false;
- if (mode_hblank_period_ns(adjusted_mode) > 300)
+ if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
return true;
@@ -428,7 +452,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
- if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
return true;
if (!dsc) {
@@ -525,14 +549,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
@@ -542,11 +567,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
+ pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- dsc_needed = intel_dp->force_dsc_en ||
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->bigjoiner_pipes);
+
+ dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
connector,
pipe_config,
@@ -566,8 +598,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n",
- str_yes_no(ret),
+ drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_mst_dsc_source_support(pipe_config))
@@ -613,7 +645,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
@@ -954,6 +986,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_crtc *pipe_crtc;
bool last_mst_stream;
intel_dp->active_mst_links--;
@@ -962,7 +995,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
intel_disable_transcoder(old_crtc_state);
@@ -980,12 +1019,18 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
- else
- ilk_pfit_disable(old_crtc_state);
+ intel_dsc_disable(old_pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_scaler_disable(old_pipe_crtc_state);
+ else
+ ilk_pfit_disable(old_pipe_crtc_state);
+ }
/*
* Power down mst path before disabling the port, otherwise we end
@@ -1117,6 +1162,39 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
+static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 clear = 0;
+ u32 set = 0;
+
+ if (!IS_ALDERLAKE_P(i915))
+ return;
+
+ if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ return;
+
+ /* Wa_14013163432:adlp */
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
+
+ /* Wa_14014143976:adlp */
+ if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+ else if (crtc_state->fec_enable)
+ clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
+ }
+
+ if (!clear && !set)
+ return;
+
+ intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+}
+
static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -1131,6 +1209,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
+ struct intel_crtc *pipe_crtc;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -1145,6 +1224,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
+ enable_bs_jitter_was(pipe_config);
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
clear_act_sent(encoder, pipe_config);
@@ -1172,7 +1253,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_enable_transcoder(pipe_config);
- intel_crtc_vblank_on(pipe_config);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(pipe_config)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
@@ -1285,7 +1372,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false, bigjoiner = false;
@@ -1302,8 +1389,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (*status != MODE_OK)
return 0;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- *status = MODE_NO_DBLESCAN;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
+
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
return 0;
}
@@ -1314,10 +1406,6 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
- ret = drm_modeset_lock(&mgr->base.lock, ctx);
- if (ret)
- return ret;
-
/*
* TODO:
* - Also check if compression would allow for the mode
@@ -1330,32 +1418,23 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
- *status = MODE_CLOCK_HIGH;
- return 0;
- }
-
- if (mode->clock < 10000) {
- *status = MODE_CLOCK_LOW;
- return 0;
- }
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
- *status = MODE_H_ILLEGAL;
- return 0;
- }
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, intel_connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
+ }
+
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
- /* TODO: add support for bigjoiner */
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1383,11 +1462,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1397,7 +1472,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
return 0;
}
@@ -1509,24 +1584,41 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- if (!connector->dp.dsc_decompression_aux)
+ if (!aux)
return false;
- if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
- &desc, true) < 0)
+ /*
+ * A logical port's OUI (at least for affected sinks) is all 0, so
+ * instead of that the parent port's OUI is used for identification.
+ */
+ if (drm_dp_mst_port_is_logical(connector->port)) {
+ aux = drm_dp_mst_aux_for_parent(connector->port);
+ if (!aux)
+ aux = &connector->mst_port->aux;
+ }
+
+ if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
return false;
- if (!drm_dp_has_quirk(&desc,
- DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
return false;
- if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
return false;
- if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ /*
+ * UHBR (MST sink) devices requiring this quirk don't advertise the
+ * HBLANK expansion support. Presuming that they perform HBLANK
+ * expansion internally, or are affected by this issue on modes with a
+ * short HBLANK for other reasons.
+ */
+ if (!drm_dp_128b132b_supported(dpcd) &&
+ !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 75d76f91ecbd..6503abdc2b98 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -348,7 +348,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
out_err:
drm_dbg_kms(&i915->drm,
- "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 4ca910874a4f..d20e4e9cf7f7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -29,6 +30,7 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
/**
@@ -123,9 +125,9 @@
*/
/**
- * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy
*/
-struct bxt_ddi_phy_info {
+struct bxt_dpio_phy_info {
/**
* @dual_channel: true if this phy has a second channel.
*/
@@ -161,7 +163,7 @@ struct bxt_ddi_phy_info {
} channel[2];
};
-static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
@@ -183,7 +185,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = false,
.rcomp_phy = DPIO_PHY1,
@@ -216,23 +218,23 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
if (IS_GEMINILAKE(dev_priv)) {
- *count = ARRAY_SIZE(glk_ddi_phy_info);
- return glk_ddi_phy_info;
+ *count = ARRAY_SIZE(glk_dpio_phy_info);
+ return glk_dpio_phy_info;
} else {
- *count = ARRAY_SIZE(bxt_ddi_phy_info);
- return bxt_ddi_phy_info;
+ *count = ARRAY_SIZE(bxt_dpio_phy_info);
+ return bxt_dpio_phy_info;
}
}
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
int count;
- const struct bxt_ddi_phy_info *phy_list =
+ const struct bxt_dpio_phy_info *phy_list =
bxt_get_phy_list(dev_priv, &count);
return &phy_list[phy];
@@ -241,7 +243,7 @@ bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
- const struct bxt_ddi_phy_info *phy_info, *phys;
+ const struct bxt_dpio_phy_info *phy_info, *phys;
int i, count;
phys = bxt_get_phy_list(dev_priv, &count);
@@ -269,16 +271,32 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
*ch = DPIO_CH0;
}
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+/*
+ * Like intel_de_rmw() but reads from a single per-lane register and
+ * writes to the group register to write the same value to all the lanes.
+ */
+static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915,
+ i915_reg_t reg_single,
+ i915_reg_t reg_group,
+ u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read(i915, reg_single);
+ val = (old & ~clear) | set;
+ intel_de_write(i915, reg_group, val);
+
+ return old;
+}
+
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
enum dpio_phy phy;
- int n_entries;
- u32 val;
+ int lane, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
@@ -290,41 +308,51 @@ void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
- trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
- val &= ~SCALE_DCOMP_METHOD;
- if (trans->entries[level].bxt.enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- drm_err(&dev_priv->drm,
- "Disabled scaling while ouniqetrangenmethod was set");
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
- val &= ~DE_EMPHASIS;
- val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane),
+ MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
+ MARGIN_000(trans->entries[level].bxt.margin) |
+ UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+ u32 val;
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane),
+ SCALE_DCOMP_METHOD,
+ trans->entries[level].bxt.enable ?
+ SCALE_DCOMP_METHOD : 0);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane));
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane),
+ DE_EMPHASIS_MASK,
+ DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
+ }
+
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -353,7 +381,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ return REG_FIELD_GET(GRC_CODE_MASK, val);
}
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
@@ -365,20 +393,20 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
phy);
}
-static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_dpio_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ if (bxt_dpio_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
@@ -399,20 +427,17 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(&dev_priv->uncore,
- BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD,
- PHY_POWER_GOOD,
- 1))
+ if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
phy);
/* Program PLL Rcomp code offset */
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
- 0xE4 << IREF0RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
- 0xE4 << IREF1RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
/* Program power gating */
intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
@@ -435,9 +460,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
dev_priv->display.state.bxt_phy_grc = val;
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
+ grc_code = GRC_CODE_FAST(val) |
+ GRC_CODE_SLOW(val) |
+ GRC_CODE_NOM(val);
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
0, GRC_DIS | GRC_RDY_OVRD);
@@ -449,9 +474,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -460,9 +485,9 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info =
+ const struct bxt_dpio_phy_info *phy_info =
bxt_get_phy_info(dev_priv, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
@@ -471,19 +496,19 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
was_enabled = true;
if (rcomp_phy != -1)
- was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+ was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
- _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ _bxt_dpio_phy_init(dev_priv, rcomp_phy);
- _bxt_ddi_phy_init(dev_priv, phy);
+ _bxt_dpio_phy_init(dev_priv, phy);
if (!was_enabled)
- bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+ bxt_dpio_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@@ -513,10 +538,10 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 mask;
bool ok;
@@ -526,23 +551,23 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ if (!bxt_dpio_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy_info->dual_channel)
ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
@@ -552,9 +577,9 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
if (phy_info->rcomp_phy != -1) {
u32 grc_code = dev_priv->display.state.bxt_phy_grc;
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
+ grc_code = GRC_CODE_FAST(grc_code) |
+ GRC_CODE_SLOW(grc_code) |
+ GRC_CODE_NOM(grc_code);
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
@@ -562,7 +587,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
- "BXT_PORT_REF_DW8(%d)", phy);
+ "BXT_PORT_REF_DW8(%d)", phy);
}
return ok;
@@ -570,7 +595,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
u8
-bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@@ -586,8 +611,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
}
}
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -598,24 +623,18 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = intel_de_read(dev_priv,
- BXT_PORT_TX_DW14_LN(phy, ch, lane));
-
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
- val &= ~LATENCY_OPTIM;
- if (lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
- val);
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ LATENCY_OPTIM,
+ lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
}
}
u8
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -701,9 +720,8 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
@@ -740,7 +758,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
@@ -749,15 +767,15 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+ val |= DPIO_SWING_MARGIN000(margin_reg_value);
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+ val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
+ val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
@@ -796,9 +814,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
@@ -843,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -866,39 +884,39 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
@@ -908,10 +926,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
+ if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
+ else
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
@@ -923,9 +941,8 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
@@ -946,11 +963,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
if (crtc_state->lane_count == 1)
- data = 0x0;
+ data = 0;
else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
+ data = (i == 1) ? 0 : DPIO_UPAR;
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -1012,21 +1028,21 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1050,24 +1066,23 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1077,26 +1092,25 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ DPIO_PCS_CLK_DATAWIDTH_8_10 |
+ DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1108,26 +1122,23 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
+ val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
+ val |= 0xc4;
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1137,12 +1148,11 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 9adc4e8c1738..226994dcb89b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -29,18 +29,18 @@ enum dpio_phy {
#ifdef I915
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
@@ -77,35 +77,35 @@ static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, en
enum dpio_phy *phy, enum dpio_channel *ch)
{
}
-static inline void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
}
-static inline void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return false;
}
-static inline bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return true;
}
-static inline u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+static inline u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
return 0;
}
-static inline void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+static inline void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
}
-static inline u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+static inline u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 3038655377ea..a981f45facb3 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -20,6 +20,7 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
struct intel_dpll_funcs {
@@ -369,38 +370,68 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
+static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
+ if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return i915->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(i915))
return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(i915) != 2)
return 96000;
else
return 48000;
}
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ u32 tmp;
+
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ else
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+
+ hw_state->dpll_md = tmp;
+ }
+
+ hw_state->dpll = intel_de_read(dev_priv, DPLL(crtc->pipe));
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ hw_state->dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
+}
+
/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ u32 dpll = hw_state->dpll;
u32 fp;
struct dpll clock;
int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
+ int refclk = i9xx_pll_refclk(crtc_state);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
+ fp = hw_state->fp0;
else
- fp = pipe_config->dpll_hw_state.fp1;
+ fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev_priv)) {
@@ -475,68 +506,69 @@ void i9xx_crtc_clock_get(struct intel_crtc *crtc,
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
- pipe_config->port_clock = port_clock;
+ crtc_state->port_clock = port_clock;
}
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- struct dpll clock;
- u32 mdiv;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
int refclk = 100000;
+ struct dpll clock;
+ u32 tmp;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+ clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
+ clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
+ clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
+ clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
+ clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
+ clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+ clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
+ clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
+ clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
+ clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
}
/*
@@ -958,37 +990,20 @@ static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
return (1 << dpll->n) << 16 | dpll->m2;
}
-static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(clock);
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(clock);
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1047,27 +1062,40 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
+ return dpll;
}
-static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
+ if (IS_PINEVIEW(dev_priv)) {
+ hw_state->fp0 = pnv_dpll_compute_fp(clock);
+ hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
- dpll = DPLL_VGA_MODE_DIS;
+static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -1104,8 +1132,19 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
+ return dpll;
+}
+
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+
+ hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
}
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1185,62 +1224,54 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
return 0;
}
+static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ return 25;
+
+ if (crtc_state->sdvo_tv_clock)
+ return 20;
+
+ return 21;
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
}
-static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
+ u32 fp;
fp = i9xx_dpll_compute_fp(clock);
if (ilk_needs_fb_cb_tune(clock, factor))
fp |= FP_CB_TUNE;
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- if (ilk_needs_fb_cb_tune(reduced_clock, factor))
- fp2 |= FP_CB_TUNE;
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return fp;
}
-static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = 0;
+ dpll = DPLL_VCO_ENABLE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1302,9 +1333,20 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
+ return dpll;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ int factor = ilk_fb_cb_factor(crtc_state);
+
+ hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
+ hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
- crtc_state->dpll_hw_state.dpll = dpll;
+ hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
}
static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1377,39 +1419,56 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
return intel_reserve_shared_dplls(state, crtc, NULL);
}
-void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
}
-void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = vlv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
+
+static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
+}
+
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = chv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
}
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1765,7 +1824,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll = crtc_state->dpll_hw_state.dpll;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1775,157 +1834,152 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
if (i9xx_has_pps(dev_priv))
assert_pps_unlocked(dev_priv, pipe);
- intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
+ intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+ enum dpio_phy phy, enum dpio_channel ch)
{
- u32 reg_val;
+ u32 tmp;
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ tmp |= 0x00000030;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0x8c000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0xb0000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
- u32 mdiv;
- u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val;
+ u32 tmp, coreclk;
vlv_dpio_get(dev_priv);
- bestn = crtc_state->dpll.n;
- bestm1 = crtc_state->dpll.m1;
- bestm2 = crtc_state->dpll.m2;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
-
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy);
+ vlv_pllb_recal_opamp(dev_priv, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
- reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
- mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
- mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
- mdiv |= ((bestn << DPIO_N_SHIFT));
- mdiv |= (1 << DPIO_K_SHIFT);
+ tmp = DPIO_M1_DIV(clock->m1) |
+ DPIO_M2_DIV(clock->m2) |
+ DPIO_P1_DIV(clock->p1) |
+ DPIO_P2_DIV(clock->p2) |
+ DPIO_N_DIV(clock->n) |
+ DPIO_K_DIV(1);
/*
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
* but we don't support that).
* Note: don't use the DAC post divider as it seems unstable.
*/
- mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
- mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1934,9 +1988,10 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
@@ -1948,6 +2003,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -1957,16 +2013,14 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
vlv_prepare_pll(crtc_state);
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1974,93 +2028,87 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- u32 loopfilter, tribuf_calcntr;
- u32 bestm2, bestp1, bestp2, bestm2_frac;
- u32 dpio_val;
- int vco;
-
- bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
- bestm2 = crtc_state->dpll.m2 >> 22;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
- vco = crtc_state->dpll.vco;
- dpio_val = 0;
- loopfilter = 0;
+ u32 tmp, loopfilter, tribuf_calcntr;
+ u32 m2_frac;
+
+ m2_frac = clock->m2 & 0x3fffff;
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
- 5 << DPIO_CHV_S1_DIV_SHIFT |
- bestp1 << DPIO_CHV_P1_DIV_SHIFT |
- bestp2 << DPIO_CHV_P2_DIV_SHIFT |
- 1 << DPIO_CHV_K_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ DPIO_CHV_S1_DIV(5) |
+ DPIO_CHV_P1_DIV(clock->p1) |
+ DPIO_CHV_P2_DIV(clock->p2) |
+ DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
- DPIO_CHV_M1_DIV_BY_2 |
- 1 << DPIO_CHV_N_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
+ DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
- dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
- dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
- if (bestm2_frac)
- dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
+ if (m2_frac)
+ tmp |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
- dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
- DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
- dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
- if (!bestm2_frac)
- dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
+ if (!m2_frac)
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
- if (vco == 5400000) {
- loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ if (clock->vco == 5400000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
+ DPIO_CHV_INT_COEFF(0x8) |
+ DPIO_CHV_GAIN_CTRL(0x1);
tribuf_calcntr = 0x9;
- } else if (vco <= 6200000) {
- loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6200000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
+ DPIO_CHV_INT_COEFF(0xB) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x9;
- } else if (vco <= 6480000) {
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6480000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x8;
} else {
/* Not supported. Apply the same limits as in the max case */
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
- dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
- dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
- DPIO_AFC_RECAL);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
}
@@ -2069,17 +2117,18 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum pipe pipe = crtc->pipe;
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
vlv_dpio_put(dev_priv);
@@ -2089,7 +2138,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
@@ -2100,6 +2149,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -2109,9 +2159,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ hw_state->dpll & ~DPLL_VCO_ENABLE);
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
chv_prepare_pll(crtc_state);
_chv_enable_pll(crtc_state);
}
@@ -2124,10 +2174,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* the value from DPLLBMD to either pipe B or C.
*/
intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B), hw_state->dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
@@ -2137,8 +2186,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
(intel_de_read(dev_priv, DPLL(PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -2199,7 +2247,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2217,9 +2265,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index ac01bb19cc6c..a86a79408af0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_hw_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
@@ -22,6 +23,8 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -39,12 +42,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ff480f171f75..90998b037349 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,6 +24,7 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -64,7 +65,8 @@ struct intel_shared_dpll_funcs {
* the pll is not already enabled.
*/
void (*enable)(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll);
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for disabling the pll, called from intel_disable_shared_dpll()
@@ -81,7 +83,7 @@ struct intel_shared_dpll_funcs {
*/
bool (*get_hw_state)(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for calculating the pll's output frequency based on its passed
@@ -89,7 +91,7 @@ struct intel_shared_dpll_funcs {
*/
int (*get_freq)(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
};
struct intel_dpll_mgr {
@@ -107,8 +109,8 @@ struct intel_dpll_mgr {
struct intel_crtc *crtc,
struct intel_encoder *encoder);
void (*update_ref_clks)(struct drm_i915_private *i915);
- void (*dump_hw_state)(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ void (*dump_hw_state)(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
};
@@ -227,7 +229,7 @@ static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
- pll->info->funcs->enable(i915, pll);
+ pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
pll->on = true;
}
@@ -352,7 +354,7 @@ intel_dpll_mask_all(struct drm_i915_private *i915)
static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *pll_state,
+ const struct intel_dpll_hw_state *dpll_hw_state,
unsigned long dpll_mask)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -379,9 +381,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
continue;
}
- if (memcmp(pll_state,
+ if (memcmp(dpll_hw_state,
&shared_dpll[pll->index].hw_state,
- sizeof(*pll_state)) == 0) {
+ sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
@@ -430,14 +432,14 @@ static void
intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_shared_dpll_state *shared_dpll;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *pll_state;
+ shared_dpll[pll->index].hw_state = *dpll_hw_state;
intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
}
@@ -519,8 +521,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -553,17 +556,19 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
}
static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(i915);
- intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
- intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
+ intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(i915, PCH_DPLL(id));
@@ -574,7 +579,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
*
* So write it again.
*/
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
intel_de_posting_read(i915, PCH_DPLL(id));
udelay(200);
}
@@ -634,21 +639,25 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static void ibx_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void ibx_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
-static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct i9xx_dpll_hw_state *a = &_a->i9xx;
+ const struct i9xx_dpll_hw_state *b = &_b->i9xx;
+
return a->dpll == b->dpll &&
a->dpll_md == b->dpll_md &&
a->fp0 == b->fp0 &&
@@ -677,19 +686,24 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
- intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
intel_de_posting_read(i915, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ intel_de_write(i915, SPLL_CTL, hw_state->spll);
intel_de_posting_read(i915, SPLL_CTL);
udelay(20);
}
@@ -728,8 +742,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -749,8 +764,9 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
intel_wakeref_t wakeref;
u32 val;
@@ -975,11 +991,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int refclk;
int n, p, r;
- u32 wrpll = pll_state->wrpll;
+ u32 wrpll = hw_state->wrpll;
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
@@ -1020,11 +1037,12 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- crtc_state->dpll_hw_state.wrpll =
+ hw_state->wrpll =
WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
@@ -1099,7 +1117,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1127,11 +1145,12 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return -EINVAL;
- crtc_state->dpll_hw_state.spll =
+ hw_state->spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
return 0;
@@ -1150,11 +1169,12 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int link_clock = 0;
- switch (pll_state->spll & SPLL_FREQ_MASK) {
+ switch (hw_state->spll & SPLL_FREQ_MASK) {
case SPLL_FREQ_810MHz:
link_clock = 81000;
break;
@@ -1225,16 +1245,21 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = 135000;
}
-static void hsw_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void hsw_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
-static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct hsw_dpll_hw_state *a = &_a->hsw;
+ const struct hsw_dpll_hw_state *b = &_b->hsw;
+
return a->wrpll == b->wrpll &&
a->spll == b->spll;
}
@@ -1254,7 +1279,8 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *hw_state)
{
}
@@ -1265,7 +1291,7 @@ static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
@@ -1332,26 +1358,31 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(i915, DPLL_CTRL1,
- DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
- pll->state.hw_state.ctrl1 << (id * 6));
+ DPLL_CTRL1_HDMI_MODE(id) |
+ DPLL_CTRL1_SSC(id) |
+ DPLL_CTRL1_LINK_RATE_MASK(id),
+ hw_state->ctrl1 << (id * 6));
intel_de_posting_read(i915, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
- skl_ddi_pll_write_ctrl1(i915, pll);
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
- intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
+ intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
intel_de_posting_read(i915, regs[id].cfgcr1);
intel_de_posting_read(i915, regs[id].cfgcr2);
@@ -1363,9 +1394,12 @@ static void skl_ddi_pll_enable(struct drm_i915_private *i915,
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- skl_ddi_pll_write_ctrl1(i915, pll);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
}
static void skl_ddi_pll_disable(struct drm_i915_private *i915,
@@ -1386,13 +1420,14 @@ static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 val;
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret;
+ u32 val;
wakeref = intel_display_power_get_if_enabled(i915,
POWER_DOMAIN_DISPLAY_CORE);
@@ -1423,8 +1458,9 @@ out:
static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -1695,16 +1731,17 @@ skip_remaining_dividers:
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+ p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1752,10 +1789,10 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return 0;
}
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
ref_clock;
- dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
@@ -1767,37 +1804,35 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
int ret;
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ hw_state->ctrl1 =
+ DPLL_CTRL1_OVERRIDE(0) |
+ DPLL_CTRL1_HDMI_MODE(0);
+
+ hw_state->cfgcr1 =
+ DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ hw_state->cfgcr2 =
+ DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
&crtc_state->dpll_hw_state);
@@ -1807,6 +1842,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
u32 ctrl1;
/*
@@ -1836,18 +1872,19 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
break;
}
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ hw_state->ctrl1 = ctrl1;
return 0;
}
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int link_clock = 0;
- switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
@@ -1921,16 +1958,18 @@ static int skl_get_dpll(struct intel_atomic_state *state,
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
/*
* ctrl1 register is already shifted for each pll, just use 0 to get
* the internal shift for each field
*/
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
- return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
+ if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
else
- return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
+ return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
}
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -1939,19 +1978,21 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void skl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void skl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: "
- "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- hw_state->ctrl1,
- hw_state->cfgcr1,
- hw_state->cfgcr2);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
}
-static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct skl_dpll_hw_state *a = &_a->skl;
+ const struct skl_dpll_hw_state *b = &_b->skl;
+
return a->ctrl1 == b->ctrl1 &&
a->cfgcr1 == b->cfgcr1 &&
a->cfgcr2 == b->cfgcr2;
@@ -1991,12 +2032,14 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 temp;
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
+ u32 temp;
bxt_port_to_phy_channel(i915, port, &phy, &ch);
@@ -2019,43 +2062,43 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
/* Write P1 & P2 */
intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
- PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
/* Write M2 integer */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
- PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
+ PORT_PLL_M2_INT_MASK, hw_state->pll0);
/* Write N */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
- PORT_PLL_N_MASK, pll->state.hw_state.pll1);
+ PORT_PLL_N_MASK, hw_state->pll1);
/* Write M2 fraction */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
- PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
+ PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
/* Write M2 fraction enable */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
- PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
+ PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
/* Write coeff */
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->state.hw_state.pll6;
+ temp |= hw_state->pll6;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
- PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
+ PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
- PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
+ PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->state.hw_state.pll10;
+ temp |= hw_state->pll10;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
@@ -2063,7 +2106,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp |= PORT_PLL_RECALIBRATE;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->state.hw_state.ebb4;
+ temp |= hw_state->ebb4;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
@@ -2075,7 +2118,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
drm_err(&i915->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(i915)) {
- temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
temp |= DCC_DELAY_RANGE_2;
intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
@@ -2087,7 +2130,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->state.hw_state.pcsdw12;
+ temp |= hw_state->pcsdw12;
intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
@@ -2112,8 +2155,9 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
enum dpio_phy phy;
@@ -2245,7 +2289,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
@@ -2283,45 +2327,47 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
else
lanestagger = 0x02;
- dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
- dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
- dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
- dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
+ hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+ hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
+ hw_state->pll1 = PORT_PLL_N(clk_div->n);
+ hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
if (clk_div->m2 & 0x3fffff)
- dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
+ hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
- dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
+ hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
PORT_PLL_INT_COEFF(int_coef) |
PORT_PLL_GAIN_CTL(gain_ctl);
- dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
+ hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
- dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
+ hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
- dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
+ hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
PORT_PLL_DCO_AMP_OVR_EN_H;
- dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+ hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
- dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
+ hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
return 0;
}
static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
struct dpll clock;
clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
+ if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
+ hw_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
}
@@ -2402,28 +2448,26 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
/* DSI non-SSC ref 19.2MHz */
}
-static void bxt_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void bxt_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
+
+ drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0, hw_state->ebb4,
+ hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
+ hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
+ hw_state->pcsdw12);
}
-static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct bxt_dpll_hw_state *a = &_a->bxt;
+ const struct bxt_dpll_hw_state *b = &_b->bxt;
+
return a->ebb0 == b->ebb0 &&
a->ebb4 == b->ebb4 &&
a->pll0 == b->pll0 &&
@@ -2554,7 +2598,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
@@ -2706,7 +2750,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
* The PLL outputs multiple frequencies at the same time, selection is
@@ -2777,17 +2821,18 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int ref_clock = icl_wrpll_ref_clock(i915);
u32 dco_fraction;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+ p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -2819,10 +2864,10 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
break;
}
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
ref_clock;
- dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
if (ehl_combo_pll_div_frac_wa_needed(i915))
@@ -2838,33 +2883,34 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
static void icl_calc_dpll_state(struct drm_i915_private *i915,
const struct skl_wrpll_params *pll_params,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 dco_fraction = pll_params->dco_fraction;
if (ehl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
- pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
+ hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
pll_params->dco_integer;
- pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
+ hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
if (DISPLAY_VER(i915) >= 12)
- pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
- pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
if (i915->display.vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
+ hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state,
+ struct icl_dpll_hw_state *hw_state,
bool is_dkl)
{
static const u8 div1_vals[] = { 7, 5, 3, 2 };
@@ -2920,12 +2966,12 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
*target_dco_khz = dco;
- state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+ hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
- state->mg_clktop2_coreclkctl1 =
+ hw_state->mg_clktop2_coreclkctl1 =
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
- state->mg_clktop2_hsclkctl =
+ hw_state->mg_clktop2_hsclkctl =
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
hsdiv |
@@ -2943,9 +2989,10 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* adapted to integer-only calculation, that's why it looks so different.
*/
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int refclk_khz = i915->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2960,7 +3007,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
int ret;
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state, is_dkl);
+ hw_state, is_dkl);
if (ret)
return ret;
@@ -3050,61 +3097,61 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
/* write pll_state calculations */
if (is_dkl) {
- pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
if (i915->display.vbt.override_afc_startup) {
u8 val = i915->display.vbt.override_afc_startup_val;
- pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
- pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
- pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
(use_ssc ? DKL_PLL_SSC_EN : 0);
- pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
} else {
- pll_state->mg_pll_div0 =
+ hw_state->mg_pll_div0 =
(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
MG_PLL_DIV0_FBDIV_INT(m2div_int);
- pll_state->mg_pll_div1 =
+ hw_state->mg_pll_div1 =
MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
MG_PLL_DIV1_DITHER_DIV_2 |
MG_PLL_DIV1_NDIVRATIO(1) |
MG_PLL_DIV1_FBPREDIV(m1div);
- pll_state->mg_pll_lf =
+ hw_state->mg_pll_lf =
MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
MG_PLL_LF_AFCCNTSEL_512 |
MG_PLL_LF_GAINCTRL(1) |
MG_PLL_LF_INT_COEFF(int_coeff) |
MG_PLL_LF_PROP_COEFF(prop_coeff);
- pll_state->mg_pll_frac_lock =
+ hw_state->mg_pll_frac_lock =
MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
MG_PLL_FRAC_LOCK_DCODITHEREN |
MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |=
+ hw_state->mg_pll_frac_lock |=
MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
- pll_state->mg_pll_ssc =
+ hw_state->mg_pll_ssc =
(use_ssc ? MG_PLL_SSC_EN : 0) |
MG_PLL_SSC_TYPE(2) |
MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
@@ -3112,14 +3159,14 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
MG_PLL_TDC_COLDST_COLDSTART |
MG_PLL_TDC_COLDST_IREFINT_EN |
MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
MG_PLL_TDC_TDCOVCCORR_EN |
MG_PLL_TDC_TDCSEL(3);
- pll_state->mg_pll_bias =
+ hw_state->mg_pll_bias =
MG_PLL_BIAS_BIAS_GB_SEL(3) |
MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
MG_PLL_BIAS_BIAS_BONUS(10) |
@@ -3129,17 +3176,17 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_BIAS_IREFTRIM(iref_trim);
if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask =
+ hw_state->mg_pll_tdc_coldst_bias_mask =
MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ hw_state->mg_pll_bias_mask = 0;
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
}
- pll_state->mg_pll_tdc_coldst_bias &=
- pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ hw_state->mg_pll_tdc_coldst_bias &=
+ hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
}
return 0;
@@ -3147,31 +3194,32 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
ref_clock = i915->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(i915) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+ m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
+ if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = hw_state->mg_pll_bias &
DKL_PLL_BIAS_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
} else {
m2_frac = 0;
}
} else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
+ if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = hw_state->mg_pll_div0 &
MG_PLL_DIV0_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
} else {
@@ -3179,7 +3227,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
}
}
- switch (pll_state->mg_clktop2_hsclkctl &
+ switch (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
@@ -3194,11 +3242,11 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
div1 = 7;
break;
default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (pll_state->mg_clktop2_hsclkctl &
+ div2 = (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
@@ -3389,7 +3437,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
@@ -3408,8 +3455,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
- encoder->port));
+ dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(dpll_id));
@@ -3435,15 +3481,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_compute_combo_phy_dpll(state, crtc);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_compute_tc_phy_dplls(state, crtc);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return 0;
}
@@ -3452,15 +3495,12 @@ static int icl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_get_combo_phy_dpll(state, crtc, encoder);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_get_tc_phy_dplls(state, crtc, encoder);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return -EINVAL;
}
@@ -3493,8 +3533,9 @@ static void icl_put_dplls(struct intel_atomic_state *state,
static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3559,8 +3600,9 @@ out:
static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3630,9 +3672,10 @@ out:
static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state,
+ struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret = false;
@@ -3690,24 +3733,24 @@ out:
static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
@@ -3747,9 +3790,9 @@ static void icl_dpll_write(struct drm_i915_private *i915,
}
static void icl_mg_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
/*
@@ -3790,9 +3833,9 @@ static void icl_mg_pll_write(struct drm_i915_private *i915,
}
static void dkl_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
u32 val;
@@ -3905,13 +3948,15 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
}
static void combo_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3927,11 +3972,14 @@ static void combo_pll_enable(struct drm_i915_private *i915,
}
static void tbt_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3945,16 +3993,18 @@ static void tbt_pll_enable(struct drm_i915_private *i915,
}
static void mg_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
if (DISPLAY_VER(i915) >= 12)
- dkl_pll_write(i915, pll);
+ dkl_pll_write(i915, pll, hw_state);
else
- icl_mg_pll_write(i915, pll);
+ icl_mg_pll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -4026,33 +4076,36 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void icl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void icl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->div0,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
-}
-
-static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
+ drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct icl_dpll_hw_state *a = &_a->icl;
+ const struct icl_dpll_hw_state *b = &_b->icl;
+
/* FIXME split combo vs. mg more thoroughly */
return a->cfgcr0 == b->cfgcr0 &&
a->cfgcr1 == b->cfgcr1 &&
@@ -4417,33 +4470,33 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* intel_dpll_get_freq - calculate the DPLL's output frequency
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @pll_state: DPLL state from which to calculate the output frequency
+ * @dpll_hw_state: DPLL state from which to calculate the output frequency
*
- * Return the output frequency corresponding to @pll's passed in @pll_state.
+ * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
return 0;
- return pll->info->funcs->get_freq(i915, pll, pll_state);
+ return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
}
/**
* intel_dpll_get_hw_state - readout the DPLL's hardware state
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @hw_state: DPLL's hardware state
+ * @dpll_hw_state: DPLL's hardware state
*
- * Read out @pll's hardware state into @hw_state.
+ * Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return pll->info->funcs->get_hw_state(i915, pll, hw_state);
+ return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct drm_i915_private *i915,
@@ -4514,22 +4567,24 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
}
/**
- * intel_dpll_dump_hw_state - write hw_state to dmesg
+ * intel_dpll_dump_hw_state - dump hw_state
* @i915: i915 drm device
- * @hw_state: hw state to be written to the log
+ * @p: where to print the state to
+ * @dpll_hw_state: hw state to be dumped
*
- * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
+ * Dumo out the relevant values in @dpll_hw_state.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (i915->display.dpll.mgr) {
- i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
+ i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- ibx_dump_hw_state(i915, hw_state);
+ ibx_dump_hw_state(p, dpll_hw_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index cc0e1386309d..f09e513ce05b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -36,6 +36,7 @@
enum tc_port;
struct drm_i915_private;
+struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -180,18 +181,19 @@ enum icl_port_dpll_id {
ICL_PORT_DPLL_COUNT,
};
-struct intel_dpll_hw_state {
- /* i9xx, pch plls */
+struct i9xx_dpll_hw_state {
u32 dpll;
u32 dpll_md;
u32 fp0;
u32 fp1;
+};
- /* hsw, bdw */
+struct hsw_dpll_hw_state {
u32 wrpll;
u32 spll;
+};
- /* skl */
+struct skl_dpll_hw_state {
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of ctrl1 and they get shifted into position when writing
@@ -201,20 +203,18 @@ struct intel_dpll_hw_state {
u32 ctrl1;
/* HDMI only, 0 when used for DP */
u32 cfgcr1, cfgcr2;
+};
- /* icl */
- u32 cfgcr0;
+struct bxt_dpll_hw_state {
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
+};
+
+struct icl_dpll_hw_state {
+ u32 cfgcr0, cfgcr1;
/* tgl */
u32 div0;
- /* bxt */
- u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
-
- /*
- * ICL uses the following, already defined:
- * u32 cfgcr0, cfgcr1;
- */
u32 mg_refclkin_ctl;
u32 mg_clktop2_coreclkctl1;
u32 mg_clktop2_hsclkctl;
@@ -229,6 +229,55 @@ struct intel_dpll_hw_state {
u32 mg_pll_tdc_coldst_bias_mask;
};
+struct intel_mpllb_state {
+ u32 clock; /* in KHz */
+ u32 ref_control;
+ u32 mpllb_cp;
+ u32 mpllb_div;
+ u32 mpllb_div2;
+ u32 mpllb_fracn1;
+ u32 mpllb_fracn2;
+ u32 mpllb_sscen;
+ u32 mpllb_sscstep;
+};
+
+struct intel_c10pll_state {
+ u32 clock; /* in KHz */
+ u8 tx;
+ u8 cmn;
+ u8 pll[20];
+};
+
+struct intel_c20pll_state {
+ u32 clock; /* in kHz */
+ u16 tx[3];
+ u16 cmn[4];
+ union {
+ u16 mplla[10];
+ u16 mpllb[11];
+ };
+};
+
+struct intel_cx0pll_state {
+ union {
+ struct intel_c10pll_state c10;
+ struct intel_c20pll_state c20;
+ };
+ bool ssc_enabled;
+};
+
+struct intel_dpll_hw_state {
+ union {
+ struct i9xx_dpll_hw_state i9xx;
+ struct hsw_dpll_hw_state hsw;
+ struct skl_dpll_hw_state skl;
+ struct bxt_dpll_hw_state bxt;
+ struct icl_dpll_hw_state icl;
+ struct intel_mpllb_state mpllb;
+ struct intel_cx0pll_state cx0pll;
+ };
+};
+
/**
* struct intel_shared_dpll_state - hold the DPLL atomic state
*
@@ -364,10 +413,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
@@ -377,7 +426,8 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915);
void intel_dpll_sanitize_state(struct drm_i915_private *i915);
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 169ef38ff188..597f8bd6aa1a 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
@@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(crtc_state->has_drrs ||
- HAS_DOUBLE_BUFFERED_M_N(i915) ||
- intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+ str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 8ef5f93a80ff..0982f95eab72 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -9,12 +9,15 @@
#include <linux/types.h>
enum drrs_type;
+enum transcoder;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d62e050185e7..4baaa92ceaec 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -340,6 +340,18 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
+static u32 dsb_chicken(struct intel_crtc *crtc)
+{
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR)
+ return DSB_SKIP_WAITS_EN |
+ DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return DSB_SKIP_WAITS_EN;
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
@@ -361,6 +373,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
+ intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
+ dsb_chicken(crtc));
+
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index d3cf6a652221..bd5888ce4852 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -64,14 +64,11 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
drm_dbg_kms(&dev_priv->drm, "\n");
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(intel_connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c076da75b066..1840f5b59229 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -223,7 +223,7 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
int target_clock = mode->clock;
enum drm_mode_status status;
@@ -231,9 +231,6 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
/* XXX: Validate clock range */
if (fixed_mode) {
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 3ea6470d6d92..86b443433e8b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1106,7 +1106,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *i915 = to_i915(fb->dev);
unsigned int height;
- u32 alignment;
+ u32 alignment, unused;
if (DISPLAY_VER(i915) >= 12 &&
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
@@ -1128,8 +1128,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
height = ALIGN(height, intel_tile_height(fb, color_plane));
/* Catch potential overflows early */
- if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
- fb->offsets[color_plane])) {
+ if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane], &unused)) {
drm_dbg_kms(&i915->drm,
"Bad offset 0x%08x or pitch %d for color plane %d\n",
fb->offsets[color_plane], fb->pitches[color_plane],
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37d2f..b6df9baf481b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b453fcbd67da..151dcd0c45b6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -54,6 +54,7 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
+#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
#define for_each_fbc_id(__dev_priv, __fbc_id) \
@@ -826,10 +827,36 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:skl,bxt
+ * Display WA #0883: skl,bxt
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_DISABLE_DUMMY0);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /*
+ * WaFbcNukeOnHostModify:skl,kbl,cfl
+ * Display WA #0873: skl,kbl,cfl
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_NUKE_ON_ANY_MODIFICATION);
+ }
+
+ /* Wa_1409120013:icl,jsl,tgl,dg1 */
+ if (IS_DISPLAY_VER(i915, 11, 12))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
- intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
- DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+ if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
new file mode 100644
index 000000000000..ae0699c3c2fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_FBC_REGS__
+#define __INTEL_FBC_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
+#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
+#define FBC_CONTROL _MMIO(0x3208)
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
+#define FBC_COMMAND _MMIO(0x320c)
+#define FBC_CMD_COMPRESS REG_BIT(0)
+#define FBC_STATUS _MMIO(0x3210)
+#define FBC_STAT_COMPRESSING REG_BIT(31)
+#define FBC_STAT_COMPRESSED REG_BIT(30)
+#define FBC_STAT_MODIFIED REG_BIT(29)
+#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
+#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
+#define FBC_CTL_FENCE_DBL REG_BIT(4)
+#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
+#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
+#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
+#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
+#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
+#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
+#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
+#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
+#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
+#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
+#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
+#define FBC_MOD_NUM_VALID REG_BIT(0)
+#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
+#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
+#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
+#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
+#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
+#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
+
+#define FBC_LL_SIZE (1536)
+
+#define DPFC_CB_BASE _MMIO(0x3200)
+#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
+#define DPFC_CONTROL _MMIO(0x3208)
+#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
+#define DPFC_CTL_EN REG_BIT(31)
+#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
+#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
+#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
+#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
+#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
+#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
+#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
+#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
+#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
+#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
+#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
+#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
+#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
+#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
+#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
+#define DPFC_RECOMP_CTL _MMIO(0x320c)
+#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
+#define DPFC_RECOMP_STALL_EN REG_BIT(27)
+#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
+#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
+#define DPFC_STATUS _MMIO(0x3210)
+#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
+#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
+#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
+#define DPFC_STATUS2 _MMIO(0x3214)
+#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
+#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
+#define DPFC_FENCE_YOFF _MMIO(0x3218)
+#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
+#define DPFC_CHICKEN _MMIO(0x3224)
+#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
+#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
+#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
+#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
+#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
+
+#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
+#define FBC_STRIDE_OVERRIDE REG_BIT(15)
+#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
+#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+
+#define ILK_FBC_RT_BASE _MMIO(0x2128)
+#define ILK_FBC_RT_VALID REG_BIT(0)
+#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
+
+#define SNB_DPFC_CTL_SA _MMIO(0x100100)
+#define SNB_DPFC_FENCE_EN REG_BIT(29)
+#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
+#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
+#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
+
+#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
+
+#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
+#define FBC_REND_NUKE REG_BIT(2)
+#define FBC_REND_CACHE_CLEAN REG_BIT(1)
+
+#endif /* __INTEL_FBC_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 99894a855ef0..bda702c2cab8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -24,7 +24,6 @@
* David Airlie
*/
-#include <linux/async.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -39,6 +38,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -58,7 +58,6 @@ struct intel_fbdev {
struct intel_framebuffer *fb;
struct i915_vma *vma;
unsigned long vma_flags;
- async_cookie_t cookie;
int preferred_bpp;
/* Whether or not fbdev hpd processing is temporarily suspended */
@@ -135,6 +134,29 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
return i915_gem_fb_mmap(obj, vma);
}
+static void intel_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ /*
+ * We rely on the object-free to release the VMA pinning for
+ * the info->screen_base mmaping. Leaking the VMA is simpler than
+ * trying to rectify all the possible error paths leading here.
+ */
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
+ drm_framebuffer_remove(&ifbdev->fb->base);
+
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ kfree(ifbdev);
+}
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops");
+
static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@@ -144,8 +166,11 @@ static const struct fb_ops intelfb_ops = {
.fb_pan_display = intel_fbdev_pan_display,
__FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev),
.fb_mmap = intel_fbdev_mmap,
+ .fb_destroy = intel_fbdev_fb_destroy,
};
+__diag_pop();
+
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -153,7 +178,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -245,7 +269,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- vga_switcheroo_client_fb_set(pdev, info);
+
return 0;
out_unpin:
@@ -271,25 +295,6 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_dirty = intelfb_dirty,
};
-static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
-{
- /* We rely on the object-free to release the VMA pinning for
- * the info->screen_base mmaping. Leaking the VMA is simpler than
- * trying to rectify all the possible error paths leading here.
- */
-
- drm_fb_helper_fini(&ifbdev->helper);
-
- if (ifbdev->vma)
- intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
-
- if (ifbdev->fb)
- drm_framebuffer_remove(&ifbdev->fb->base);
-
- drm_fb_helper_unprepare(&ifbdev->helper);
- kfree(ifbdev);
-}
-
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
@@ -453,93 +458,6 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
true);
}
-int intel_fbdev_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev;
- int ret;
-
- if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
- return -ENODEV;
-
- ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
- if (ifbdev == NULL)
- return -ENOMEM;
-
- mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
-
- if (intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
- else
- ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
-
- ret = drm_fb_helper_init(dev, &ifbdev->helper);
- if (ret) {
- kfree(ifbdev);
- return ret;
- }
-
- dev_priv->display.fbdev.fbdev = ifbdev;
- INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
-
- return 0;
-}
-
-static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
-{
- struct intel_fbdev *ifbdev = data;
-
- /* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper))
- intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-}
-
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
-}
-
-static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
-{
- if (!ifbdev->cookie)
- return;
-
- /* Only serialises with all preceding async calls, hence +1 */
- async_synchronize_cookie(ifbdev->cookie + 1);
- ifbdev->cookie = 0;
-}
-
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
-
- if (!current_is_async())
- intel_fbdev_sync(ifbdev);
-
- drm_fb_helper_unregister_info(&ifbdev->helper);
-}
-
-void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
-
- if (!ifbdev)
- return;
-
- intel_fbdev_destroy(ifbdev);
-}
-
/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
@@ -622,15 +540,13 @@ set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
-void intel_fbdev_output_poll_changed(struct drm_device *dev)
+static int intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
- return;
-
- intel_fbdev_sync(ifbdev);
+ return -EINVAL;
mutex_lock(&ifbdev->hpd_lock);
send_hpd = !ifbdev->hpd_suspended;
@@ -639,21 +555,137 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
drm_fb_helper_hotplug_event(&ifbdev->helper);
+
+ return 0;
}
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
+static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ int ret;
if (!ifbdev)
- return;
+ return -EINVAL;
- intel_fbdev_sync(ifbdev);
if (!ifbdev->vma)
+ return -ENOMEM;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
+ if (ret)
+ return ret;
+
+ intel_fbdev_invalidate(ifbdev);
+
+ return 0;
+}
+
+/*
+ * Fbdev client and struct drm_client_funcs
+ */
+
+static void intel_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = fb_helper->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (fb_helper->info) {
+ vga_switcheroo_client_fb_set(pdev, NULL);
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_fb_helper_unprepare(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int intel_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_i915_private *dev_priv = to_i915(client->dev);
+ int ret;
+
+ ret = intel_fbdev_restore_mode(dev_priv);
+ if (ret)
+ return ret;
+
+ vga_switcheroo_process_delayed_switch();
+
+ return 0;
+}
+
+static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int ret;
+
+ if (dev->fb_helper)
+ return intel_fbdev_output_poll_changed(dev);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ vga_switcheroo_client_fb_set(pdev, fb_helper->info);
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs intel_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = intel_fbdev_client_unregister,
+ .restore = intel_fbdev_client_restore,
+ .hotplug = intel_fbdev_client_hotplug,
+};
+
+void intel_fbdev_setup(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ struct intel_fbdev *ifbdev;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
return;
- if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
- intel_fbdev_invalidate(ifbdev);
+ ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
+
+ i915->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
+ mutex_init(&ifbdev->hpd_lock);
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
+
+ ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
+ &intel_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_fb_helper_unprepare;
+ }
+
+ drm_client_register(&ifbdev->helper.client);
+
+ return;
+
+err_drm_fb_helper_unprepare:
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ mutex_destroy(&ifbdev->hpd_lock);
+ kfree(ifbdev);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 04fd523a5023..08de2d5b3433 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -14,29 +14,11 @@ struct intel_fbdev;
struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
}
@@ -44,13 +26,6 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
-{
-}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/display/intel_fixed.h
index a327094de2bd..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/display/intel_fixed.h
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d3e03ed5b79c..9c8e1e91ff1c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -411,7 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
intel_de_write_fw(i915, GMBUS4(i915), 0);
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 9edac27bab26..d5ed4c7dfbc0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -369,9 +369,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
- HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
- 10, 1, &val);
+ ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
if (ret)
return ret;
else if (!(val & HDCP_KEY_LOAD_STATUS))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 302bff75b06c..35823e1f65d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -13,6 +13,12 @@
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd_in;
+ void *hdcp_cmd_out;
+};
+
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 14;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index eba2057c5a9e..5f610df61cc9 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -10,12 +10,7 @@
#include <linux/types.h>
struct drm_i915_private;
-
-struct intel_hdcp_gsc_message {
- struct i915_vma *vma;
- void *hdcp_cmd_in;
- void *hdcp_cmd_out;
-};
+struct intel_hdcp_gsc_message;
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 90d2236fede3..5f6deceaf8ba 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -114,6 +114,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GAMUT;
case DP_SDP_VSC:
return 0;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -137,6 +139,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ENABLE_AS_ADL;
case DP_SDP_PPS:
return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
@@ -164,6 +168,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_ADAPTIVE_SYNC:
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
@@ -186,6 +192,8 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ASYNC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
@@ -563,6 +571,9 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
+ if (HAS_AS_SDP(dev_priv))
+ mask |= VIDEO_DIP_ENABLE_AS_ADL;
+
return val & mask;
}
@@ -570,6 +581,7 @@ static const u8 infoframe_type_to_idx[] = {
HDMI_PACKET_TYPE_GENERAL_CONTROL,
HDMI_PACKET_TYPE_GAMUT_METADATA,
DP_SDP_VSC,
+ DP_SDP_ADAPTIVE_SYNC,
HDMI_INFOFRAME_TYPE_AVI,
HDMI_INFOFRAME_TYPE_SPD,
HDMI_INFOFRAME_TYPE_VENDOR,
@@ -1212,7 +1224,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
- VIDEO_DIP_ENABLE_DRM_GLK);
+ VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
intel_de_write(dev_priv, reg, val);
@@ -1832,7 +1844,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1854,11 +1866,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
- if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
+ if (intel_encoder_is_combo(encoder) && clock > 500000 && clock < 533200)
return MODE_CLOCK_RANGE;
/* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
- if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
+ if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
return MODE_CLOCK_RANGE;
/*
@@ -1981,7 +1993,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2664,8 +2676,9 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
drm_scdc_set_scrambling(connector, scrambling);
}
-static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2686,8 +2699,9 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 bxt_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2705,9 +2719,9 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2731,22 +2745,23 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
return GMBUS_PIN_1_BXT + port;
- else if (intel_phy_is_tc(dev_priv, phy))
- return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+ else if (intel_encoder_is_tc(encoder))
+ return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
-static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u8 ddc_pin;
switch (phy) {
@@ -2767,11 +2782,12 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_C);
+ WARN_ON(encoder->port == PORT_C);
/*
* Pin mapping for RKL depends on which PCH is present. With TGP, the
@@ -2785,11 +2801,12 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return GMBUS_PIN_1_BXT + phy;
}
-static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, port == PORT_A);
+ drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2803,16 +2820,16 @@ static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port po
return GMBUS_PIN_1_BXT + phy;
}
-static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 dg1_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- return intel_port_to_phy(dev_priv, port) + 1;
+ return intel_encoder_to_phy(encoder) + 1;
}
-static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 adls_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_B || port == PORT_C);
+ WARN_ON(encoder->port == PORT_B || encoder->port == PORT_C);
/*
* Pin mapping for ADL-S requires TC pins for all combo phy outputs
@@ -2824,9 +2841,9 @@ static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port
return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
}
-static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2850,30 +2867,29 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u8 ddc_pin;
if (IS_ALDERLAKE_S(dev_priv))
- ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = adls_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
- ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = rkl_encoder_to_ddc_pin(encoder);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
- ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
- ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = mcc_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = icl_encoder_to_ddc_pin(encoder);
else if (HAS_PCH_CNP(dev_priv))
- ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = cnp_encoder_to_ddc_pin(encoder);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = bxt_encoder_to_ddc_pin(encoder);
else if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
- ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = g4x_encoder_to_ddc_pin(encoder);
return ddc_pin;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 76076509f771..d270bb7b9462 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1444,7 +1444,7 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 5863763de530..93e6cac9a4ed 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -72,7 +72,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
+#include "intel_audio_regs.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 221f5c6c871b..8b8959073466 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -392,16 +392,13 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index fcbb083318a7..68bd5101ec89 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -27,7 +27,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/firmware.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
@@ -263,7 +262,6 @@ struct intel_opregion {
struct opregion_asle *asle;
struct opregion_asle_ext *asle_ext;
void *rvda;
- void *vbt_firmware;
const void *vbt;
u32 vbt_size;
struct work_struct asle_work;
@@ -869,46 +867,6 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = dev_priv->display.opregion;
- const struct firmware *fw = NULL;
- const char *name = dev_priv->display.params.vbt_firmware;
- int ret;
-
- if (!name || !*name)
- return -ENOENT;
-
- ret = request_firmware(&fw, name, dev_priv->drm.dev);
- if (ret) {
- drm_err(&dev_priv->drm,
- "Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
- return ret;
- }
-
- if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
- opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (opregion->vbt_firmware) {
- drm_dbg_kms(&dev_priv->drm,
- "Found valid VBT firmware \"%s\"\n", name);
- opregion->vbt = opregion->vbt_firmware;
- opregion->vbt_size = fw->size;
- ret = 0;
- } else {
- ret = -ENOMEM;
- }
- } else {
- drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
- name);
- ret = -EINVAL;
- }
-
- release_firmware(fw);
-
- return ret;
-}
-
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion;
@@ -1006,9 +964,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
}
- if (intel_load_vbt_firmware(dev_priv) == 0)
- goto out;
-
if (dmi_check_system(intel_no_opregion_vbt))
goto out;
@@ -1176,6 +1131,16 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
+bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (!opregion || !opregion->vbt)
+ return false;
+
+ return true;
+}
+
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
struct intel_opregion *opregion = i915->display.opregion;
@@ -1186,7 +1151,7 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
if (size)
*size = opregion->vbt_size;
- return opregion->vbt;
+ return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
@@ -1312,7 +1277,6 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
memunmap(opregion->header);
if (opregion->rvda)
memunmap(opregion->rvda);
- kfree(opregion->vbt_firmware);
kfree(opregion);
i915->display.opregion = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 0bec224f711f..4b2b8e752632 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -53,6 +53,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
+bool intel_opregion_vbt_present(struct drm_i915_private *i915);
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -119,6 +120,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ return false;
+}
+
static inline const void *
intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2b1392d5a902..1c2099ed5514 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -972,10 +972,11 @@ static int check_overlay_dst(struct intel_overlay *overlay,
rec->dst_width, rec->dst_height);
clipped = req;
- drm_rect_intersect(&clipped, &crtc_state->pipe_src);
- if (!drm_rect_visible(&clipped) ||
- !drm_rect_equals(&clipped, &req))
+ if (!drm_rect_intersect(&clipped, &crtc_state->pipe_src))
+ return -EINVAL;
+
+ if (!drm_rect_equals(&clipped, &req))
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 073ea3166c36..6f4ff6a89c32 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -47,10 +47,12 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->display.params.panel_use_ssc >= 0)
- return i915->display.params.panel_use_ssc != 0;
- return i915->display.vbt.lvds_use_ssc &&
- !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
+ struct intel_display *display = &i915->display;
+
+ if (display->params.panel_use_ssc >= 0)
+ return display->params.panel_use_ssc != 0;
+ return display->vbt.lvds_use_ssc &&
+ !intel_has_quirk(display, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index baf679759e00..826e38a9e6a4 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -474,7 +474,7 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
- i9xx_crtc_clock_get(crtc, crtc_state);
+ i9xx_crtc_clock_get(crtc_state);
/*
* In case there is an active pipe without active ports,
@@ -529,7 +529,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
&crtc_state->dpll_hw_state);
drm_WARN_ON(&dev_priv->drm, !pll_active);
- tmp = crtc_state->dpll_hw_state.dpll;
+ tmp = crtc_state->dpll_hw_state.i9xx.dpll;
crtc_state->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 744e332fa2af..9ca981b7a12c 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -119,10 +119,11 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
if (!encoder)
return;
- phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
return;
+ phy = intel_encoder_to_phy(encoder);
+
if (set_bit)
pmdemand_state->active_combo_phys_mask |= BIT(phy);
else
@@ -222,14 +223,7 @@ static bool
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
struct intel_encoder *encoder)
{
- enum phy phy;
-
- if (!encoder)
- return false;
-
- phy = intel_port_to_phy(i915, encoder->port);
-
- return intel_phy_is_tc(i915, phy);
+ return encoder && intel_encoder_is_tc(encoder);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index 2941a1a18b72..128fd61f8f14 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -43,9 +43,8 @@ struct intel_pmdemand_state {
struct pmdemand_params params;
};
-#define to_intel_pmdemand_state(x) container_of((x), \
- struct intel_pmdemand_state, \
- base)
+#define to_intel_pmdemand_state(global_state) \
+ container_of_const((global_state), struct intel_pmdemand_state, base)
void intel_pmdemand_init_early(struct drm_i915_private *i915);
int intel_pmdemand_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 2d65a538f83e..0ccbf9a85914 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -605,8 +605,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
+ if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
drm_err(&dev_priv->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -1351,7 +1350,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
struct edp_power_seq *vbt)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
*vbt = connector->panel.vbt.edp.pps;
@@ -1364,9 +1363,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
+ if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
vbt->t11_t12);
}
@@ -1671,6 +1670,37 @@ void intel_pps_setup(struct drm_i915_private *i915)
i915->display.pps.mmio_base = PPS_BASE;
}
+static int intel_pps_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->pps.panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->pps.backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->pps.backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_pps);
+
+void intel_pps_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
+ connector, &intel_pps_fops);
+}
+
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index a2c2467e3c22..07ef96ca8da2 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -51,6 +51,8 @@ void vlv_pps_init(struct intel_encoder *encoder,
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_connector_debugfs_add(struct intel_connector *connector);
+
void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6927785fd6ff..f5b33335a9ae 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -171,14 +171,27 @@
*
* The rest of the bits are more self-explanatory and/or
* irrelevant for normal operation.
+ *
+ * Description of intel_crtc_state variables. has_psr, has_panel_replay and
+ * has_sel_update:
+ *
+ * has_psr (alone): PSR1
+ * has_psr + has_sel_update: PSR2
+ * has_psr + has_panel_replay: Panel Replay
+ * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
+ *
+ * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * sel_update_enabled
+ *
+ * enabled (alone): PSR1
+ * enabled + sel_update_enabled: PSR2
+ * enabled + panel_replay_enabled: Panel Replay
+ * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
*/
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
-#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
- (intel_dp)->psr.source_panel_replay_support)
-
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -330,6 +343,9 @@ static void psr_irq_control(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
@@ -619,40 +635,59 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
return false;
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.panel_replay_enabled ?
+ PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
+}
+
+/*
+ * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
+ * are relying on PSR definitions on these "common" bits.
+ */
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- if (intel_dp->psr.panel_replay_enabled)
- return;
-
- if (intel_dp->psr.psr2_enabled) {
+ if (crtc_state->has_psr2) {
/* Enable ALPM at sink for psr2 */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE |
- DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+ if (!crtc_state->has_panel_replay) {
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
+ if (psr2_su_region_et_valid(intel_dp))
+ dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
+ }
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
- if (psr2_su_region_et_valid(intel_dp))
- dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ if (crtc_state->has_panel_replay)
+ dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
+ DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
if (intel_dp->psr.entry_setup_frames > 0)
dpcd_val |= DP_PSR_FRAME_CAPTURE;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp),
+ dpcd_val);
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (intel_dp_is_edp(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
@@ -1126,6 +1161,141 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+/*
+ * See Bspec: 71632 for the table
+ *
+ * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
+ *
+ * Half cycle duration:
+ *
+ * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
+ * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
+ *
+ * Link rates 5.4 - 8.1
+ * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
+ * LFPS Period chosen is the mid-point of the min:max values from the table
+ * FLOOR( LFPS Period in Symbol clocks /
+ * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
+ */
+static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
+ int *silence_period,
+ int *lfps_half_cycle)
+{
+ switch (link_rate) {
+ case 162000:
+ *silence_period = 20;
+ *lfps_half_cycle = 5;
+ break;
+ case 216000:
+ *silence_period = 27;
+ *lfps_half_cycle = 7;
+ break;
+ case 243000:
+ *silence_period = 31;
+ *lfps_half_cycle = 8;
+ break;
+ case 270000:
+ *silence_period = 34;
+ *lfps_half_cycle = 9;
+ break;
+ case 324000:
+ *silence_period = 41;
+ *lfps_half_cycle = 11;
+ break;
+ case 432000:
+ *silence_period = 56;
+ *lfps_half_cycle = 15;
+ break;
+ case 540000:
+ *silence_period = 69;
+ *lfps_half_cycle = 12;
+ break;
+ case 648000:
+ *silence_period = 84;
+ *lfps_half_cycle = 15;
+ break;
+ case 675000:
+ *silence_period = 87;
+ *lfps_half_cycle = 15;
+ break;
+ case 810000:
+ *silence_period = 104;
+ *lfps_half_cycle = 19;
+ break;
+ default:
+ *silence_period = *lfps_half_cycle = -1;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
+ * tSilence, Max+ tPHY Establishment + tCDS) / tline)
+ * For the "PHY P2 to P0" latency see the PHY Power Control page
+ * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
+ * : 12 us
+ * The tLFPS_Period, Max term is 800ns
+ * The tSilence, Max term is 180ns
+ * The tPHY Establishment (a.k.a. t1) term is 50us
+ * The tCDS term is 1 or 2 times t2
+ * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
+ * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
+ * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
+ * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
+ * within the CDS period complete within the CDS period regardless of
+ * entry into the period
+ * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
+ * TPS4 Length = 252 Symbols
+ */
+static int _lnl_compute_aux_less_wake_time(int port_clock)
+{
+ int tphy2_p2_to_p0 = 12 * 1000;
+ int tlfps_period_max = 800;
+ int tsilence_max = 180;
+ int t1 = 50 * 1000;
+ int tps4 = 252;
+ int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
+ int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
+ int t2 = num_ml_phy_lock * tml_phy_lock;
+ int tcds = 1 * t2;
+
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
+ t1 + tcds, 1000);
+}
+
+static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int aux_less_wake_time, aux_less_wake_lines, silence_period,
+ lfps_half_cycle;
+
+ aux_less_wake_time =
+ _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ aux_less_wake_time);
+
+ if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
+ &silence_period,
+ &lfps_half_cycle))
+ return false;
+
+ if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
+ silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
+ lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
+ return false;
+
+ if (i915->display.params.psr_safest_params)
+ aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
+
+ intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
+ intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
+ intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+
+ return true;
+}
+
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1142,6 +1312,9 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (check_entry_lines > 15)
return false;
+ if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
+ return false;
+
if (i915->display.params.psr_safest_params)
check_entry_lines = 15;
@@ -1150,28 +1323,52 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
return true;
}
+/*
+ * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
+ * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
+ * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
+ */
+static int skl_io_buffer_wake_time(void)
+{
+ return 18;
+}
+
+static int tgl_io_buffer_wake_time(void)
+{
+ return 10;
+}
+
+static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_io_buffer_wake_time();
+ else
+ return skl_io_buffer_wake_time();
+}
+
static bool _compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ int tfw_exit_latency = 20; /* eDP spec */
+ int phy_wake = 4; /* eDP spec */
+ int preamble = 8; /* eDP spec */
+ int precharge = intel_dp_aux_fw_sync_len() - preamble;
u8 max_wake_lines;
- if (DISPLAY_VER(i915) >= 12) {
- io_wake_time = 42;
- /*
- * According to Bspec it's 42us, but based on testing
- * it is not enough -> use 45 us.
- */
- fast_wake_time = 45;
+ io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
+ preamble + phy_wake + tfw_exit_latency;
+ fast_wake_time = precharge + preamble + phy_wake +
+ tfw_exit_latency;
+ if (DISPLAY_VER(i915) >= 12)
/* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
- } else {
- io_wake_time = 50;
- fast_wake_time = 32;
+ else
max_wake_lines = 8;
- }
io_wake_lines = intel_usecs_to_scanlines(
&crtc_state->hw.adjusted_mode, io_wake_time);
@@ -1422,12 +1619,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR disabled due to bigjoiner\n");
+ return;
+ }
+
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
- else
- crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
- if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
+ crtc_state->has_psr = crtc_state->has_panel_replay ? true :
+ _psr_compute_config(intel_dp, crtc_state);
+
+ if (!crtc_state->has_psr)
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
@@ -1454,7 +1663,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
goto unlock;
if (intel_dp->psr.panel_replay_enabled) {
- pipe_config->has_panel_replay = true;
+ pipe_config->has_psr = pipe_config->has_panel_replay = true;
} else {
/*
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
@@ -1559,14 +1768,44 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
+ u32 alpm_ctl;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
+ !intel_dp_is_edp(intel_dp)))
return;
- intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
- ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
+ /*
+ * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
+ * check panel support at this point.
+ */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ alpm_ctl = ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
+
+ intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ psr->alpm_parameters.silence_period_sym_clocks));
+
+ intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
+ PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms));
+ } else {
+ alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
+ }
+
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
+
+ intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1574,7 +1813,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- u32 mask;
+ u32 mask = 0;
/*
* Only HSW and BDW have PSR AUX registers that need to be setup.
@@ -1588,34 +1827,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
* can rely on frontbuffer tracking.
+ *
+ * From bspec prior LunarLake:
+ * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
+ * panel replay mode.
+ *
+ * From bspec beyod LunarLake:
+ * Panel Replay on DP: No bits are applicable
+ * Panel Replay on eDP: All bits are applicable
*/
- mask = EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD;
+ if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ mask = EDP_PSR_DEBUG_MASK_HPD;
- /*
- * For some unknown reason on HSW non-ULT (or at least on
- * Dell Latitude E6540) external displays start to flicker
- * when PSR is enabled on the eDP. SR/PC6 residency is much
- * higher than should be possible with an external display.
- * As a workaround leave LPSP unmasked to prevent PSR entry
- * when external displays are active.
- */
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_LPSP;
+ if (intel_dp_is_edp(intel_dp)) {
+ mask |= EDP_PSR_DEBUG_MASK_MEMUP;
- if (DISPLAY_VER(dev_priv) < 20)
- mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+ /*
+ * For some unknown reason on HSW non-ULT (or at least on
+ * Dell Latitude E6540) external displays start to flicker
+ * when PSR is enabled on the eDP. SR/PC6 residency is much
+ * higher than should be possible with an external display.
+ * As a workaround leave LPSP unmasked to prevent PSR entry
+ * when external displays are active.
+ */
+ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_LPSP;
- /*
- * No separate pipe reg write mask on hsw/bdw, so have to unmask all
- * registers in order to keep the CURSURFLIVE tricks working :(
- */
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
- mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+ if (DISPLAY_VER(dev_priv) < 20)
+ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- /* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+ * registers in order to keep the CURSURFLIVE tricks working :(
+ */
+ if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ /* allow PSR with sprite enabled */
+ if (IS_HASWELL(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ }
intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
@@ -1634,7 +1885,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- lnl_alpm_configure(intel_dp);
+ if (intel_dp_is_edp(intel_dp))
+ lnl_alpm_configure(intel_dp);
/*
* Wa_16013835468
@@ -1675,6 +1927,9 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
+ if (intel_dp->psr.panel_replay_enabled)
+ goto no_err;
+
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
@@ -1692,6 +1947,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
return false;
}
+no_err:
return true;
}
@@ -1700,7 +1956,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@@ -1722,14 +1977,22 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.panel_replay_enabled) {
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
- else
+ } else {
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
- intel_psr_enable_sink(intel_dp);
+ /*
+ * Panel replay has to be enabled before link training: doing it
+ * only for PSR here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
+ }
+
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dig_port->base, true);
+
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
@@ -1799,8 +2062,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- enum phy phy = intel_port_to_phy(dev_priv,
- dp_to_dig_port(intel_dp)->base.port);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1835,12 +2096,25 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
- intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
+
+ /* Panel Replay on eDP is always using ALPM aux less. */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+ }
/* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp), 0);
- if (intel_dp->psr.psr2_enabled)
+ if (!intel_dp->psr.panel_replay_enabled &&
+ intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
@@ -1888,7 +2162,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1921,7 +2195,7 @@ void intel_psr_resume(struct intel_dp *intel_dp)
{
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1994,6 +2268,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
@@ -2013,6 +2288,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
crtc_state->psr2_man_track_ctl);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2051,6 +2332,25 @@ exit:
crtc_state->psr2_man_track_ctl = val;
}
+static u32
+psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
+ bool full_update, bool cursor_in_su_area)
+{
+ int width, height;
+
+ if (!crtc_state->enable_psr2_su_region_et || full_update)
+ return 0;
+
+ if (!cursor_in_su_area)
+ return PIPESRC_WIDTH(0) |
+ PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
+}
+
static void clip_area_update(struct drm_rect *overlap_damage_area,
struct drm_rect *damage_area,
struct drm_rect *pipe_src)
@@ -2095,21 +2395,38 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
* cursor fully when cursor is in SU area.
*/
static void
-intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *cursor_state)
+intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool *cursor_in_su_area)
{
- struct drm_rect inter;
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- if (!crtc_state->enable_psr2_su_region_et ||
- !cursor_state->uapi.visible)
+ if (!crtc_state->enable_psr2_su_region_et)
return;
- inter = crtc_state->psr2_su_area;
- if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
- return;
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ struct drm_rect inter;
- clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
- &crtc_state->pipe_src);
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (plane->id != PLANE_CURSOR)
+ continue;
+
+ if (!new_plane_state->uapi.visible)
+ continue;
+
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
+ &crtc_state->pipe_src);
+ *cursor_in_su_area = true;
+ }
}
/*
@@ -2152,10 +2469,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state, *old_plane_state,
- *cursor_plane_state = NULL;
+ struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false;
+ bool full_update = false, cursor_in_su_area = false;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2238,13 +2554,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
-
- /*
- * Cursor plane new state is stored to adjust su area to cover
- * cursor are fully.
- */
- if (plane->id == PLANE_CURSOR)
- cursor_plane_state = new_plane_state;
}
/*
@@ -2273,9 +2582,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /* Adjust su area to cover cursor fully as necessary */
- if (cursor_plane_state)
- intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary (early
+ * transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible cursor is added into
+ * affected planes even when cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
@@ -2338,6 +2651,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, full_update);
+ crtc_state->pipe_srcsz_early_tpt =
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
+ cursor_in_su_area);
return 0;
}
@@ -2394,7 +2710,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
+ if (!crtc_state->has_psr)
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2994,6 +3310,13 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
}
}
+/*
+ * On common bits:
+ * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
+ * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
+ * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
+ * this function is relying on PSR definitions
+ */
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3003,7 +3326,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -3017,12 +3340,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
+ if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
+ (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
+ !error_status)
drm_dbg_kms(&dev_priv->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
@@ -3042,8 +3367,10 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
- psr_alpm_check(intel_dp);
- psr_capability_changed_check(intel_dp);
+ if (!psr->panel_replay_enabled) {
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+ }
exit:
mutex_unlock(&psr->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index cde781df84d5..d483c85870e1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,8 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 8427a736f639..ebc22999572c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -9,7 +9,7 @@
#include "intel_display_reg_defs.h"
#include "intel_dp_aux_regs.h"
-#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -23,7 +23,7 @@
#define HSW_SRD_CTL _MMIO(0x64800)
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_CTL_A)
#define EDP_PSR_ENABLE REG_BIT(31)
#define BDW_PSR_SINGLE_FRAME REG_BIT(30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */
@@ -66,8 +66,8 @@
#define EDP_PSR_IIR _MMIO(0x64838)
#define _PSR_IMR_A 0x60814
#define _PSR_IIR_A 0x60818
-#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
-#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
#define TGL_PSR_MASK REG_GENMASK(2, 0)
@@ -86,7 +86,7 @@
#define HSW_SRD_AUX_CTL _MMIO(0x64810)
#define _SRD_AUX_CTL_A 0x60810
#define _SRD_AUX_CTL_EDP 0x6f810
-#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A)
+#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_CTL_A)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK
@@ -96,12 +96,12 @@
#define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
#define HSW_SRD_STATUS _MMIO(0x64840)
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_STATUS_A)
#define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29)
#define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0)
#define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1)
@@ -126,14 +126,14 @@
#define HSW_SRD_PERF_CNT _MMIO(0x64844)
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0)
/* PSR_MASK on SKL+ */
#define HSW_SRD_DEBUG _MMIO(0x64860)
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_DEBUG_A)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28)
#define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27)
#define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26)
@@ -153,7 +153,7 @@
#define _PSR2_CTL_A 0x60900
#define _PSR2_CTL_EDP 0x6f900
-#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE REG_BIT(31)
#define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
@@ -195,7 +195,7 @@
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
#define _PSR_EVENT_TRANS_EDP 0x6f848
-#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17)
#define PSR_EVENT_PSR2_DISABLED REG_BIT(16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15)
@@ -215,13 +215,13 @@
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
-#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
-#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(dev_priv, tran, _PSR2_SU_STATUS_A + (index) * 4)
#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
@@ -229,7 +229,7 @@
#define _PSR2_MAN_TRK_CTL_A 0x60910
#define _PSR2_MAN_TRK_CTL_EDP 0x6f910
-#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_MAN_TRK_CTL_A)
#define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
@@ -249,7 +249,7 @@
/* PSR2 Early transport */
#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
-#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
+#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(dev_priv, trans, _PIPE_SRCSZ_ERLY_TPT_A)
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
@@ -297,7 +297,7 @@
_SEL_FETCH_PLANE_BASE_1_A)
#define _ALPM_CTL_A 0x60950
-#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
+#define ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
@@ -321,7 +321,7 @@
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
#define _ALPM_CTL2_A 0x60954
-#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
+#define ALPM_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL2_A)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
@@ -335,7 +335,7 @@
#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
#define _PORT_ALPM_CTL_A 0x16fa2c
-#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
+#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_CTL_A)
#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
@@ -345,12 +345,16 @@
#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
-#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
+#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_LFPS_CTL_A)
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
-#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
-#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK, (val) - PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a280448df771..14d5fefc9c5b 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,72 +9,72 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
-static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- i915->display.quirks.mask |= BIT(quirk);
+ display->quirks.mask |= BIT(quirk);
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
-static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+static void quirk_ssc_force_disable(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
- drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
+ intel_set_quirk(display, QUIRK_LVDS_SSC_DISABLE);
+ drm_info(display->drm, "applying lvds SSC disable quirk\n");
}
/*
* A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
* brightness value
*/
-static void quirk_invert_brightness(struct drm_i915_private *i915)
+static void quirk_invert_brightness(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
- drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
+ intel_set_quirk(display, QUIRK_INVERT_BRIGHTNESS);
+ drm_info(display->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_i915_private *i915)
+static void quirk_backlight_present(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
- drm_info(&i915->drm, "applying backlight present quirk\n");
+ intel_set_quirk(display, QUIRK_BACKLIGHT_PRESENT);
+ drm_info(display->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
* which is 300 ms greater than eDP spec T12 min.
*/
-static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+static void quirk_increase_t12_delay(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
- drm_info(&i915->drm, "Applying T12 delay quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_T12_DELAY);
+ drm_info(display->drm, "Applying T12 delay quirk\n");
}
/*
* GeminiLake NUC HDMI outputs require additional off time
* this allows the onboard retimer to correctly sync to signal
*/
-static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+static void quirk_increase_ddi_disabled_time(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
- drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME);
+ drm_info(display->drm, "Applying Increase DDI Disabled quirk\n");
}
-static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
+static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
- drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
+ intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
+ drm_info(display->drm, "Applying no pps backlight power quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
};
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
const struct dmi_system_id (*dmi_id_list)[];
};
@@ -203,9 +203,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
-void intel_init_quirks(struct drm_i915_private *i915)
+void intel_init_quirks(struct intel_display *display)
{
- struct pci_dev *d = to_pci_dev(i915->drm.dev);
+ struct pci_dev *d = to_pci_dev(display->drm->dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
@@ -216,15 +216,15 @@ void intel_init_quirks(struct drm_i915_private *i915)
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
- q->hook(i915);
+ q->hook(display);
}
for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(i915);
+ intel_dmi_quirks[i].hook(display);
}
}
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- return i915->display.quirks.mask & BIT(quirk);
+ return display->quirks.mask & BIT(quirk);
}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 10a4d163149f..151c8f4ae576 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT,
@@ -19,7 +19,7 @@ enum intel_quirk_id {
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
};
-void intel_init_quirks(struct drm_i915_private *i915);
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
+void intel_init_quirks(struct intel_display *display);
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5f9e748adc89..d0d712405129 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -193,7 +193,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
}
#define to_intel_sdvo_connector_state(conn_state) \
- container_of((conn_state), struct intel_sdvo_connector_state, base.base)
+ container_of_const((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo);
@@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- encoder->audio_disable(encoder, old_crtc_state, conn_state);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -1948,7 +1944,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int clock = mode->clock;
@@ -1956,9 +1952,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -2382,7 +2375,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
u64 *val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
+ const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index bc61e736f9b3..e6df1f92def5 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -44,12 +44,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
}
}
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
- enum phy phy, bool enable)
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
- if (!intel_phy_is_snps(i915, phy))
+ if (!intel_encoder_is_snps(encoder))
return;
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
@@ -63,7 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
@@ -1809,7 +1811,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->mpllb_state = *tables[i];
+ crtc_state->dpll_hw_state.mpllb = *tables[i];
return 0;
}
}
@@ -1821,8 +1823,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1879,7 +1881,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
void intel_mpllb_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1951,7 +1953,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
struct intel_mpllb_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
@@ -1999,7 +2001,7 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_mpllb_state mpllb_hw_state = {};
- const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb;
struct intel_encoder *encoder;
if (!IS_DG2(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 515abf7c5902..bc08b92a7cd9 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -17,8 +17,8 @@ struct intel_mpllb_state;
enum phy;
void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
- enum phy phy, bool enable);
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable);
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index d7b440c8caef..36a253a19c74 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -47,6 +47,7 @@
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+#include "intel_sprite_regs.h"
static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
{
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_regs.h b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
new file mode 100644
index 000000000000..bb67705652b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_SPRITE_REGS__
+#define __INTEL_SPRITE_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE REG_BIT(31)
+#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
+#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
+#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
+#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
+#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
+#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
+#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
+#define DVS_SOURCE_KEY REG_BIT(22)
+#define DVS_RGB_ORDER_XBGR REG_BIT(20)
+#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
+#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
+#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
+#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
+#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
+#define DVS_ROTATE_180 REG_BIT(15)
+#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define DVS_TILED REG_BIT(10)
+#define DVS_DEST_KEY REG_BIT(2)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
+#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
+#define DVS_POS_X_MASK REG_GENMASK(15, 0)
+#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
+#define _DVSASIZE 0x72190
+#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
+#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
+#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
+#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define DVS_ADDR_MASK REG_GENMASK(31, 12)
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
+#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
+#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE REG_BIT(31)
+#define DVS_FILTER_MASK REG_GENMASK(30, 29)
+#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
+#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
+#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
+#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
+#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
+
+#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE REG_BIT(31)
+#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
+#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
+#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
+#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
+#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
+#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
+#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
+#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
+#define SPRITE_SOURCE_KEY REG_BIT(22)
+#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
+#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
+#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
+#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
+#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
+#define SPRITE_ROTATE_180 REG_BIT(15)
+#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
+#define SPRITE_TILED REG_BIT(10)
+#define SPRITE_DEST_KEY REG_BIT(2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
+#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
+#define _SPRA_SIZE 0x70290
+#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
+#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
+#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
+#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE REG_BIT(31)
+#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
+#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
+#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
+#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
+#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
+#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
+#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
+
+#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
+#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
+#define SP_ENABLE REG_BIT(31)
+#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SP_FORMAT_MASK REG_GENMASK(29, 26)
+#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
+#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
+#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
+#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
+#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
+#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
+#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
+#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
+#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
+#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
+#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
+#define SP_SOURCE_KEY REG_BIT(22)
+#define SP_YUV_FORMAT_BT709 REG_BIT(18)
+#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
+#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
+#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
+#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
+#define SP_ROTATE_180 REG_BIT(15)
+#define SP_TILED REG_BIT(10)
+#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
+#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
+#define SP_POS_Y_MASK REG_GENMASK(31, 16)
+#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
+#define SP_POS_X_MASK REG_GENMASK(15, 0)
+#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
+#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
+#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
+#define SP_WIDTH_MASK REG_GENMASK(15, 0)
+#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
+#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
+#define SP_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
+#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
+#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
+#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
+#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
+#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
+#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
+#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
+#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
+#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
+#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
+#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
+#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
+#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
+#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
+#define SP_SH_COS_MASK REG_GENMASK(9, 0)
+#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
+
+#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
+#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
+#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
+
+#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
+#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
+#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
+
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define _MMIO_CHV_SPCSC(plane_id, reg) \
+ _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
+
+#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
+#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
+#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
+#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
+#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
+#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
+#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
+#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
+#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
+#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
+#define SPCSC_C1_MASK REG_GENMASK(30, 16)
+#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
+#define SPCSC_C0_MASK REG_GENMASK(14, 0)
+#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
+
+#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
+#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
+#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
+#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
+#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
+#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
+#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
+#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
+#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
+#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
+#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
+#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
+
+#endif /* __INTEL_SPRITE_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6b374d481cd9..9887967b2ca5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -100,11 +100,9 @@ static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && tc->mode == mode;
+ return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
}
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
@@ -124,11 +122,9 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
+ return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
}
/*
@@ -254,8 +250,7 @@ assert_tc_cold_blocked(struct intel_tc_port *tc)
static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -302,7 +297,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
@@ -375,9 +370,8 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
+ if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
return 4;
assert_tc_cold_blocked(tc);
@@ -458,9 +452,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum port port = tc->dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
@@ -812,7 +804,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
@@ -1635,10 +1627,7 @@ static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return false;
return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
@@ -1740,11 +1729,9 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return;
cancel_delayed_work(&tc->link_reset_work);
@@ -1861,7 +1848,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 2b77d399f1a1..9df0f1263913 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -885,7 +885,8 @@ struct intel_tv_connector_state {
bool bypass_vfilter;
};
-#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+#define to_intel_tv_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_tv_connector_state, base)
static struct drm_connector_state *
intel_tv_connector_duplicate_state(struct drm_connector *connector)
@@ -961,16 +962,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a9f44abfc9fc..3815aa21d19f 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -485,6 +485,7 @@ struct child_device_config {
u8 hdmi_iboost_level:4; /* 196+ */
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
+ u8 efp_index; /* 256+ */
} __packed;
struct bdb_general_definitions {
@@ -602,22 +603,22 @@ struct bdb_driver_features {
u8 custom_vbt_version; /* 155+ */
/* Driver Feature Flags */
- u16 rmpm_enabled:1; /* 165+ */
- u16 s2ddt_enabled:1; /* 165+ */
- u16 dpst_enabled:1; /* 165-227 */
- u16 bltclt_enabled:1; /* 165+ */
- u16 adb_enabled:1; /* 165-227 */
- u16 drrs_enabled:1; /* 165-227 */
- u16 grs_enabled:1; /* 165+ */
- u16 gpmt_enabled:1; /* 165+ */
- u16 tbt_enabled:1; /* 165+ */
+ u16 rmpm_enabled:1; /* 159+ */
+ u16 s2ddt_enabled:1; /* 159+ */
+ u16 dpst_enabled:1; /* 159-227 */
+ u16 bltclt_enabled:1; /* 159+ */
+ u16 adb_enabled:1; /* 159-227 */
+ u16 drrs_enabled:1; /* 159-227 */
+ u16 grs_enabled:1; /* 159+ */
+ u16 gpmt_enabled:1; /* 159+ */
+ u16 tbt_enabled:1; /* 159+ */
u16 psr_enabled:1; /* 165-227 */
u16 ips_enabled:1; /* 165+ */
- u16 dpfs_enabled:1; /* 165+ */
+ u16 dfps_enabled:1; /* 165+ */
u16 dmrrs_enabled:1; /* 174-227 */
u16 adt_enabled:1; /* ???-228 */
u16 hpd_wake:1; /* 201-240 */
- u16 pc_feature_valid:1;
+ u16 pc_feature_valid:1; /* 159+ */
} __packed;
/*
@@ -880,11 +881,12 @@ struct bdb_lvds_lfp_data_tail {
struct lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
- u8 obsolete1:5;
+ u8 i2c_pin:3; /* obsolete since ? */
+ u8 i2c_speed:2; /* obsolete since ? */
u16 pwm_freq_hz;
u8 min_brightness; /* ???-233 */
- u8 obsolete2;
- u8 obsolete3;
+ u8 i2c_address; /* obsolete since ? */
+ u8 i2c_command; /* obsolete since ? */
} __packed;
struct lfp_backlight_control_method {
@@ -905,8 +907,8 @@ struct lfp_brightness_level {
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* ???-233 */
- struct lfp_backlight_control_method backlight_control[16];
+ u8 level[16]; /* 162-233 */
+ struct lfp_backlight_control_method backlight_control[16]; /* 191+ */
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
u8 brightness_precision_bits[16]; /* 236+ */
@@ -917,7 +919,7 @@ struct bdb_lfp_backlight_data {
* Block 44 - LFP Power Conservation Features Block
*/
struct lfp_power_features {
- u8 reserved1:1;
+ u8 dpst_support:1; /* ???-159 */
u8 power_conservation_pref:3;
u8 reserved2:1;
u8 lace_enabled_status:1; /* 210+ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5d905f932cb4..894ee97b3e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -9,6 +9,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vrr.h"
+#include "intel_dp.h"
bool intel_vrr_is_capable(struct intel_connector *connector)
{
@@ -113,10 +114,18 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
@@ -165,6 +174,14 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (crtc_state->uapi.vrr_enabled) {
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ if (intel_dp_as_sdp_supported(intel_dp)) {
+ crtc_state->vrr.vsync_start =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->vrr.vsync_end =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_end);
+ }
}
}
@@ -187,10 +204,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * TRANS_SET_CONTEXT_LATENCY with VRR enabled
- * requires this chicken bit on ADL/DG2.
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (DISPLAY_VER(dev_priv) == 13)
+ if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
@@ -239,6 +257,12 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder),
+ VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
+ VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
@@ -257,13 +281,16 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 trans_vrr_ctl;
+ u32 trans_vrr_ctl, trans_vrr_vsync;
trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
@@ -283,6 +310,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
}
- if (crtc_state->vrr.enable)
+ if (crtc_state->vrr.enable) {
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ if (HAS_AS_SDP(dev_priv)) {
+ trans_vrr_vsync =
+ intel_de_read(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder));
+ crtc_state->vrr.vsync_start =
+ REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
+ crtc_state->vrr.vsync_end =
+ REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
+ }
+ }
}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 8a934bada624..baa601d27815 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -213,10 +213,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* The pipe scaler does not use all the bits of PIPESRC, at least
* on the earlier platforms. So even when we're scaling a plane
* the *pipe* source size must not be too large. For simplicity
- * we assume the limits match the scaler source size limits. Might
- * not be 100% accurate on all platforms, but good enough for now.
+ * we assume the limits match the scaler destination size limits.
+ * Might not be 100% accurate on all platforms, but good enough for
+ * now.
*/
- if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e941e2e4fd14..860574d04f88 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index c6b9be80d83c..7c6187b4479f 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,18 +6,19 @@
#include <drm/drm_blend.h>
#include "i915_drv.h"
-#include "i915_fixed.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fixed.h"
#include "intel_pcode.h"
#include "intel_wm.h"
#include "skl_watermark.h"
@@ -69,7 +70,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 9;
}
-static bool
+bool
intel_has_sagv(struct drm_i915_private *i915)
{
return HAS_SAGV(i915) &&
@@ -2601,10 +2602,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915))
+ if (HAS_MBUS_JOINING(i915)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
+ if (ret)
+ return ret;
+ }
+ }
+
for_each_intel_crtc(&i915->drm, crtc) {
enum pipe pipe = crtc->pipe;
@@ -2628,13 +2636,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes_late(state, "MBUS joining change");
- if (ret)
- return ret;
- }
-
drm_dbg_kms(&i915->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
@@ -3057,6 +3058,8 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (HAS_MBUS_JOINING(i915))
dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
+
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3530,85 +3533,6 @@ int intel_dbuf_init(struct drm_i915_private *i915)
return 0;
}
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(i915))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(i915, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(i915,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(i915,
- new_dbuf_state->enabled_slices);
-}
-
static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
@@ -3628,14 +3552,12 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
return false;
}
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
const struct intel_crtc *crtc;
u32 val = 0;
- int i;
if (DISPLAY_VER(i915) < 11)
return;
@@ -3679,12 +3601,9 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
u32 pipe_val = val;
- if (!new_crtc_state->hw.active)
- continue;
-
if (DISPLAY_VER(i915) >= 14) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
new_dbuf_state->active_pipes))
@@ -3697,6 +3616,217 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
}
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(dbuf_state))
+ return PTR_ERR(dbuf_state);
+
+ dbuf_state->mdclk_cdclk_ratio = ratio;
+
+ return intel_atomic_lock_global_state(&dbuf_state->base);
+}
+
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus)
+{
+ enum dbuf_slice slice;
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+
+ if (joined_mbus)
+ ratio *= 2;
+
+ drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ ratio, str_yes_no(joined_mbus));
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+}
+
+static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ int mdclk_cdclk_ratio;
+
+ if (intel_cdclk_is_decreasing_later(state)) {
+ /* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
+ mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
+ } else {
+ /* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
+ mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
+ }
+
+ intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ new_dbuf_state->joined_mbus);
+}
+
+static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
+ return pipe;
+ else
+ return INVALID_PIPE;
+}
+
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ u32 mbus_ctl;
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ if (new_dbuf_state->joined_mbus)
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
+ else
+ mbus_ctl = MBUS_HASHING_MODE_2x2;
+
+ if (pipe != INVALID_PIPE)
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
+ else
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+}
+
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
+{
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mbus_join_update(state, pipe);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mdclk_min_tracker_update(state);
+ }
+}
+
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mbus_join_update(state, pipe);
+
+ if (pipe != INVALID_PIPE) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ } else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
+ old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ }
+
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices;
+ new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+ new_slices = new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index e3d1d74a7b17..91f92c0e706e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -25,6 +25,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool intel_can_enable_sagv(struct drm_i915_private *i915,
const struct intel_bw_state *bw_state);
+bool intel_has_sagv(struct drm_i915_private *i915);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
const struct skl_ddb_entry *entry);
@@ -58,22 +59,31 @@ struct intel_dbuf_state {
u8 slices[I915_MAX_PIPES];
u8 enabled_slices;
u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
bool joined_mbus;
};
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
+
#define intel_atomic_get_old_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio);
+
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus);
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index 20b30c9a6613..269163fa3350 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -32,14 +32,16 @@
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
-#define MBUS_CTL _MMIO(0x4438C)
-#define MBUS_JOIN REG_BIT(31)
-#define MBUS_HASHING_MODE_MASK REG_BIT(30)
-#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
-#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
-#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
-#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
-#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
+#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
/* Watermark register definitions for SKL */
#define _CUR_WM_A_0 0x70140
diff --git a/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
new file mode 100644
index 000000000000..2b83f334b1ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __VLV_DPIO_PHY_REGS_H__
+#define __VLV_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _VLV_CMN(dw) (0x8100 + (dw) * 4)
+#define _CHV_CMN(cl, dw) (0x8100 - (cl) * 0x80 + (dw) * 4)
+#define _VLV_PLL(ch, dw) (0x8000 + (ch) * 0x20 + (dw) * 4) /* dw 0-7,16-23 */
+#define _CHV_PLL(ch, dw) (0x8000 + (ch) * 0x180 + (dw) * 4)
+#define _VLV_REF(dw) (0x80a0 + ((dw) - 8) * 4) /* dw 8-15 */
+#define _VLV_PCS(ch, spline, dw) (0x200 + (ch) * 0x2400 + (spline) * 0x200 + (dw) * 4)
+#define _VLV_PCS_GRP(ch, dw) (0x8200 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_PCS_BCAST(dw) (0xc000 + (dw) * 4)
+#define _VLV_TX(ch, lane, dw) (0x80 + (ch) * 0x2400 + (lane) * 0x200 + (dw) * 4)
+#define _VLV_TX_GRP(ch, dw) (0x8280 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_TX_BCAST(dw) (0xc080 + (dw) * 4)
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define VLV_PLL_DW3(ch) _VLV_PLL((ch), 3)
+#define DPIO_S1_DIV_MASK REG_GENMASK(30, 28)
+#define DPIO_S1_DIV(s1) REG_FIELD_PREP(DPIO_S1_DIV_MASK, (s1))
+#define DPIO_S1_DIV_DAC 0 /* 10, DAC 25-225M rate */
+#define DPIO_S1_DIV_HDMIDP 1 /* 5, DAC 225-400M rate */
+#define DPIO_S1_DIV_LVDS1 2 /* 14 */
+#define DPIO_S1_DIV_LVDS2 3 /* 7 */
+#define DPIO_K_DIV_MASK REG_GENMASK(27, 24)
+#define DPIO_K_DIV(k) REG_FIELD_PREP(DPIO_K_DIV_MASK, (k))
+#define DPIO_P1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_P1_DIV(p1) REG_FIELD_PREP(DPIO_P1_DIV_MASK, (p1))
+#define DPIO_P2_DIV_MASK REG_GENMASK(20, 16)
+#define DPIO_P2_DIV(p2) REG_FIELD_PREP(DPIO_P2_DIV_MASK, (p2))
+#define DPIO_N_DIV_MASK REG_GENMASK(15, 12)
+#define DPIO_N_DIV(n) REG_FIELD_PREP(DPIO_N_DIV_MASK, (n))
+#define DPIO_ENABLE_CALIBRATION REG_BIT(11)
+#define DPIO_M1_DIV_MASK REG_GENMASK(10, 8)
+#define DPIO_M1_DIV(m1) REG_FIELD_PREP(DPIO_M1_DIV_MASK, (m1))
+#define DPIO_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_M2_DIV(m2) REG_FIELD_PREP(DPIO_M2_DIV_MASK, (m2))
+
+#define VLV_PLL_DW5(ch) _VLV_PLL((ch), 5)
+#define DPIO_REFSEL_OVERRIDE REG_BIT(27)
+#define DPIO_PLL_MODESEL_MASK REG_GENMASK(26, 24)
+#define DPIO_BIAS_CURRENT_CTL_MASK REG_GENMASK(22, 20) /* always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_MASK REG_GENMASK(17, 16)
+#define DPIO_DRIVER_CTL_MASK REG_GENMASK(15, 12) /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_MASK REG_GENMASK(11, 8) /* always set to 0x5 */
+
+#define VLV_PLL_DW7(ch) _VLV_PLL((ch), 7)
+
+#define VLV_PLL_DW16(ch) _VLV_PLL((ch), 16)
+
+#define VLV_PLL_DW17(ch) _VLV_PLL((ch), 17)
+
+#define VLV_PLL_DW18(ch) _VLV_PLL((ch), 18)
+
+#define VLV_PLL_DW19(ch) _VLV_PLL((ch), 19)
+
+#define VLV_REF_DW11 _VLV_REF(11)
+
+#define VLV_CMN_DW0 _VLV_CMN(0)
+
+/*
+ * Per DDI channel DPIO regs
+ */
+#define VLV_PCS_DW0_GRP(ch) _VLV_PCS_GRP((ch), 0)
+#define VLV_PCS01_DW0(ch) _VLV_PCS((ch), 0, 0)
+#define VLV_PCS23_DW0(ch) _VLV_PCS((ch), 1, 0)
+#define DPIO_PCS_TX_LANE2_RESET REG_BIT(16)
+#define DPIO_PCS_TX_LANE1_RESET REG_BIT(7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 REG_BIT(4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 REG_BIT(3)
+
+#define VLV_PCS_DW1_GRP(ch) _VLV_PCS_GRP((ch), 1)
+#define VLV_PCS01_DW1(ch) _VLV_PCS((ch), 0, 1)
+#define VLV_PCS23_DW1(ch) _VLV_PCS((ch), 1, 1)
+#define CHV_PCS_REQ_SOFTRESET_EN REG_BIT(23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN REG_BIT(22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN REG_BIT(21)
+#define DPIO_PCS_CLK_DATAWIDTH_MASK REG_GENMASK(7, 6)
+#define DPIO_PCS_CLK_DATAWIDTH_8_10 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 1)
+#define DPIO_PCS_CLK_DATAWIDTH_16_20 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 2)
+#define DPIO_PCS_CLK_DATAWIDTH_32_40 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 3)
+#define DPIO_PCS_CLK_SOFT_RESET REG_BIT(5)
+
+#define VLV_PCS_DW8_GRP(ch) _VLV_PCS_GRP((ch), 8)
+#define VLV_PCS01_DW8(ch) _VLV_PCS((ch), 0, 8)
+#define VLV_PCS23_DW8(ch) _VLV_PCS((ch), 1, 8)
+#define DPIO_PCS_USEDCLKCHANNEL REG_BIT(21)
+#define DPIO_PCS_USEDCLKCHANNEL_OVRRIDE REG_BIT(20)
+
+#define VLV_PCS_DW9_GRP(ch) _VLV_PCS_GRP((ch), 9)
+#define VLV_PCS01_DW9(ch) _VLV_PCS((ch), 0, 9)
+#define VLV_PCS23_DW9(ch) _VLV_PCS((ch), 1, 9)
+#define DPIO_PCS_TX2MARGIN_MASK REG_GENMASK(15, 13)
+#define DPIO_PCS_TX2MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 0)
+#define DPIO_PCS_TX2MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 1)
+#define DPIO_PCS_TX1MARGIN_MASK REG_GENMASK(12, 10)
+#define DPIO_PCS_TX1MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 0)
+#define DPIO_PCS_TX1MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 1)
+
+#define VLV_PCS_DW10_GRP(ch) _VLV_PCS_GRP((ch), 10)
+#define VLV_PCS01_DW10(ch) _VLV_PCS((ch), 0, 10)
+#define VLV_PCS23_DW10(ch) _VLV_PCS((ch), 1, 10)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 REG_BIT(31)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 REG_BIT(30)
+#define DPIO_PCS_TX2DEEMP_MASK REG_GENMASK(27, 24)
+#define DPIO_PCS_TX2DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 0)
+#define DPIO_PCS_TX2DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 2)
+#define DPIO_PCS_TX1DEEMP_MASK REG_GENMASK(19, 16)
+#define DPIO_PCS_TX1DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 0)
+#define DPIO_PCS_TX1DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 2)
+
+#define VLV_PCS_DW11_GRP(ch) _VLV_PCS_GRP((ch), 11)
+#define VLV_PCS01_DW11(ch) _VLV_PCS((ch), 0, 11)
+#define VLV_PCS23_DW11(ch) _VLV_PCS((ch), 1, 11)
+#define DPIO_TX2_STAGGER_MASK_MASK REG_GENMASK(28, 24)
+#define DPIO_TX2_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MASK_MASK, (x))
+#define DPIO_LANEDESKEW_STRAP_OVRD REG_BIT(3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER REG_BIT(1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER REG_BIT(0)
+
+#define VLV_PCS_DW12_GRP(ch) _VLV_PCS_GRP((ch), 12)
+#define VLV_PCS01_DW12(ch) _VLV_PCS((ch), 0, 12)
+#define VLV_PCS23_DW12(ch) _VLV_PCS((ch), 1, 12)
+#define DPIO_TX2_STAGGER_MULT_MASK REG_GENMASK(22, 20)
+#define DPIO_TX2_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MULT_MASK REG_GENMASK(20, 16)
+#define DPIO_TX1_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MASK_MASK REG_GENMASK(12, 8)
+#define DPIO_TX1_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MASK_MASK, (x))
+#define DPIO_LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define DPIO_LANESTAGGER_STRAP_MASK REG_GENMASK(4, 0)
+#define DPIO_LANESTAGGER_STRAP(x) REG_FIELD_PREP(DPIO_LANESTAGGER_STRAP_MASK, (x))
+
+#define VLV_PCS_DW14_GRP(ch) _VLV_PCS_GRP((ch), 14)
+#define VLV_PCS01_DW14(ch) _VLV_PCS((ch), 0, 14)
+#define VLV_PCS23_DW14(ch) _VLV_PCS((ch), 1, 14)
+
+#define VLV_PCS_DW17_BCAST _VLV_PCS_BCAST(17)
+#define VLV_PCS_DW17_GRP(ch) _VLV_PCS_GRP((ch), 17)
+#define VLV_PCS01_DW17(ch) _VLV_PCS((ch), 0, 17)
+#define VLV_PCS23_DW17(ch) _VLV_PCS((ch), 1, 17)
+
+#define VLV_PCS_DW23_GRP(ch) _VLV_PCS_GRP((ch), 23)
+#define VLV_PCS01_DW23(ch) _VLV_PCS((ch), 0, 23)
+#define VLV_PCS23_DW23(ch) _VLV_PCS((ch), 1, 23)
+
+#define VLV_TX_DW2_GRP(ch) _VLV_TX_GRP((ch), 2)
+#define VLV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define DPIO_SWING_MARGIN000_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN000(x) REG_FIELD_PREP(DPIO_SWING_MARGIN000_MASK, (x))
+#define DPIO_UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define DPIO_UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(DPIO_UNIQ_TRANS_SCALE_MASK, (x))
+
+#define VLV_TX_DW3_GRP(ch) _VLV_TX_GRP((ch), 3)
+#define VLV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN REG_BIT(27)
+#define DPIO_SWING_MARGIN101_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN101(x) REG_FIELD_PREP(DPIO_SWING_MARGIN101_MASK, (x))
+
+#define VLV_TX_DW4_GRP(ch) _VLV_TX_GRP((ch), 4)
+#define VLV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define DPIO_SWING_DEEMPH9P5_MASK REG_GENMASK(31, 24)
+#define DPIO_SWING_DEEMPH9P5(x) REG_FIELD_PREP(DPIO_SWING_DEEMPH9P5_MASK, (x))
+#define DPIO_SWING_DEEMPH6P0_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_DEEMPH6P0_SHIFT REG_FIELD_PREP(DPIO_SWING_DEEMPH6P0_MASK, (x))
+
+#define VLV_TX_DW5_GRP(ch) _VLV_TX_GRP((ch), 5)
+#define VLV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define DPIO_TX_OCALINIT_EN REG_BIT(31)
+
+#define VLV_TX_DW11_GRP(ch) _VLV_TX_GRP((ch), 11)
+#define VLV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+
+#define VLV_TX_DW14_GRP(ch) _VLV_TX_GRP((ch), 14)
+#define VLV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+
+/* CHV dpPhy registers */
+#define CHV_PLL_DW0(ch) _CHV_PLL((ch), 0)
+#define DPIO_CHV_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_CHV_M2_DIV(m2) REG_FIELD_PREP(DPIO_CHV_M2_DIV_MASK, (m2))
+
+#define CHV_PLL_DW1(ch) _CHV_PLL((ch), 1)
+#define DPIO_CHV_N_DIV_MASK REG_GENMASK(11, 8)
+#define DPIO_CHV_N_DIV(n) REG_FIELD_PREP(DPIO_CHV_N_DIV_MASK, (n))
+#define DPIO_CHV_M1_DIV_MASK REG_GENMASK(2, 0)
+#define DPIO_CHV_M1_DIV(m1) REG_FIELD_PREP(DPIO_CHV_M1_DIV_MASK, (m1))
+#define DPIO_CHV_M1_DIV_BY_2 0
+
+#define CHV_PLL_DW2(ch) _CHV_PLL((ch), 2)
+#define DPIO_CHV_M2_FRAC_DIV_MASK REG_GENMASK(21, 0)
+#define DPIO_CHV_M2_FRAC_DIV(m2_frac) REG_FIELD_PREP(DPIO_CHV_M2_FRAC_DIV_MASK, (m2_frac))
+
+#define CHV_PLL_DW3(ch) _CHV_PLL((ch), 3)
+#define DPIO_CHV_FRAC_DIV_EN REG_BIT(16)
+#define DPIO_CHV_SECOND_MOD REG_BIT(8)
+#define DPIO_CHV_FEEDFWD_GAIN_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_FEEDFWD_GAIN(x) REG_FIELD_PREP(DPIO_CHV_FEEDFWD_GAIN_MASK, (x))
+
+#define CHV_PLL_DW6(ch) _CHV_PLL((ch), 6)
+#define DPIO_CHV_GAIN_CTRL_MASK REG_GENMASK(18, 16)
+#define DPIO_CHV_GAIN_CTRL(x) REG_FIELD_PREP(DPIO_CHV_GAIN_CTRL_MASK, (x))
+#define DPIO_CHV_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_INT_COEFF(x) REG_FIELD_PREP(DPIO_CHV_INT_COEFF_MASK, (x))
+#define DPIO_CHV_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_PROP_COEFF(x) REG_FIELD_PREP(DPIO_CHV_PROP_COEFF_MASK, (x))
+
+#define CHV_PLL_DW8(ch) _CHV_PLL((ch), 8)
+#define DPIO_CHV_TDC_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define DPIO_CHV_TDC_TARGET_CNT(x) REG_FIELD_PREP(DPIO_CHV_TDC_TARGET_CNT_MASK, (x))
+
+#define CHV_PLL_DW9(ch) _CHV_PLL((ch), 9)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD(x) REG_FIELD_PREP(DPIO_CHV_INT_LOCK_THRESHOLD_MASK, (x))
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE REG_BIT(0) /* 1: coarse & 0 : fine */
+
+#define CHV_CMN_DW0_CH0 _CHV_CMN(0, 0)
+#define DPIO_ALLDL_POWERDOWN_CH0 REG_BIT(19)
+#define DPIO_ANYDL_POWERDOWN_CH0 REG_BIT(18)
+#define DPIO_ALLDL_POWERDOWN BIT(1)
+#define DPIO_ANYDL_POWERDOWN BIT(0)
+
+#define CHV_CMN_DW5_CH0 _CHV_CMN(0, 5)
+#define CHV_BUFRIGHTENA1_MASK REG_GENMASK(21, 20)
+#define CHV_BUFRIGHTENA1_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 0)
+#define CHV_BUFRIGHTENA1_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 1)
+#define CHV_BUFRIGHTENA1_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 3)
+#define CHV_BUFLEFTENA1_MASK REG_GENMASK(23, 22)
+#define CHV_BUFLEFTENA1_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 0)
+#define CHV_BUFLEFTENA1_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 1)
+#define CHV_BUFLEFTENA1_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 3)
+
+#define CHV_CMN_DW13_CH0 _CHV_CMN(0, 13)
+#define CHV_CMN_DW0_CH1 _CHV_CMN(1, 0)
+#define DPIO_CHV_S1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_CHV_S1_DIV(s1) REG_FIELD_PREP(DPIO_CHV_S1_DIV_MASK, (s1))
+#define DPIO_CHV_P1_DIV_MASK REG_GENMASK(15, 13)
+#define DPIO_CHV_P1_DIV(p1) REG_FIELD_PREP(DPIO_CHV_P1_DIV_MASK, (p1))
+#define DPIO_CHV_P2_DIV_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_P2_DIV(p2) REG_FIELD_PREP(DPIO_CHV_P2_DIV_MASK, (p2))
+#define DPIO_CHV_K_DIV_MASK REG_GENMASK(7, 4)
+#define DPIO_CHV_K_DIV(k) REG_FIELD_PREP(DPIO_CHV_K_DIV_MASK, (k))
+#define DPIO_PLL_FREQLOCK REG_BIT(1)
+#define DPIO_PLL_LOCK REG_BIT(0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, CHV_CMN_DW13_CH0, CHV_CMN_DW0_CH1)
+
+#define CHV_CMN_DW14_CH0 _CHV_CMN(0, 14)
+#define CHV_CMN_DW1_CH1 _CHV_CMN(1, 1)
+#define DPIO_AFC_RECAL REG_BIT(14)
+#define DPIO_DCLKP_EN REG_BIT(13)
+#define CHV_BUFLEFTENA2_MASK REG_GENMASK(18, 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 0)
+#define CHV_BUFLEFTENA2_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 1)
+#define CHV_BUFLEFTENA2_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 3)
+#define CHV_BUFRIGHTENA2_MASK REG_GENMASK(20, 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 0)
+#define CHV_BUFRIGHTENA2_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 1)
+#define CHV_BUFRIGHTENA2_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 3)
+#define CHV_CMN_DW14(ch) _PIPE(ch, CHV_CMN_DW14_CH0, CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW19_CH0 _CHV_CMN(0, 19)
+#define CHV_CMN_DW6_CH1 _CHV_CMN(1, 6)
+#define DPIO_ALLDL_POWERDOWN_CH1 REG_BIT(30) /* CL2 DW6 only */
+#define DPIO_ANYDL_POWERDOWN_CH1 REG_BIT(29) /* CL2 DW6 only */
+#define DPIO_DYNPWRDOWNEN_CH1 REG_BIT(28) /* CL2 DW6 only */
+#define CHV_CMN_USEDCLKCHANNEL REG_BIT(13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, CHV_CMN_DW19_CH0, CHV_CMN_DW6_CH1)
+
+#define CHV_CMN_DW28 _CHV_CMN(0, 28)
+#define DPIO_CL1POWERDOWNEN REG_BIT(23)
+#define DPIO_DYNPWRDOWNEN_CH0 REG_BIT(22)
+#define DPIO_SUS_CLK_CONFIG_MASK REG_GENMASK(1, 0)
+#define DPIO_SUS_CLK_CONFIG_ON REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 0)
+#define DPIO_SUS_CLK_CONFIG_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 1)
+#define DPIO_SUS_CLK_CONFIG_GATE REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 2)
+#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 3)
+
+#define CHV_CMN_DW30 _CHV_CMN(0, 30)
+#define DPIO_CL2_LDOFUSE_PWRENB REG_BIT(6)
+#define DPIO_LRC_BYPASS REG_BIT(3)
+
+#define CHV_TX_DW0(ch, lane) _VLV_TX((ch), (lane), 0)
+#define CHV_TX_DW1(ch, lane) _VLV_TX((ch), (lane), 1)
+#define CHV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define CHV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+#define CHV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define CHV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define CHV_TX_DW6(ch, lane) _VLV_TX((ch), (lane), 6)
+#define CHV_TX_DW7(ch, lane) _VLV_TX((ch), (lane), 7)
+#define CHV_TX_DW8(ch, lane) _VLV_TX((ch), (lane), 8)
+#define CHV_TX_DW9(ch, lane) _VLV_TX((ch), (lane), 9)
+#define CHV_TX_DW10(ch, lane) _VLV_TX((ch), (lane), 10)
+
+#define CHV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+#define DPIO_FRC_LATENCY_MASK REG_GENMASK(10, 8)
+#define DPIO_FRC_LATENCY(x) REG_FIELD_PREP(DPIO_FRC_LATENCY_MASK, (x))
+
+#define CHV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+#define DPIO_UPAR REG_BIT(30)
+
+#endif /* __VLV_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 9b33b8a74d64..ee9923c7b115 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,20 +85,18 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port),
mask, 100))
- drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
+ drm_err(display->drm, "DPI FIFOs are not empty\n");
}
-static void write_data(struct drm_i915_private *dev_priv,
+static void write_data(struct intel_display *display,
i915_reg_t reg,
const u8 *data, u32 len)
{
@@ -110,18 +108,18 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
}
-static void read_data(struct drm_i915_private *dev_priv,
+static void read_data(struct intel_display *display,
i915_reg_t reg,
u8 *data, u32 len)
{
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -132,8 +130,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
- struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dsi *intel_dsi = intel_dsi_host->intel_dsi;
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
@@ -148,51 +146,51 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
header = packet.header;
if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
- data_reg = MIPI_LP_GEN_DATA(port);
+ data_reg = MIPI_LP_GEN_DATA(display, port);
data_mask = LP_DATA_FIFO_FULL;
- ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_reg = MIPI_LP_GEN_CTRL(display, port);
ctrl_mask = LP_CTRL_FIFO_FULL;
} else {
- data_reg = MIPI_HS_GEN_DATA(port);
+ data_reg = MIPI_HS_GEN_DATA(display, port);
data_mask = HS_DATA_FIFO_FULL;
- ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_reg = MIPI_HS_GEN_CTRL(display, port);
ctrl_mask = HS_CTRL_FIFO_FULL;
}
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP DATA FIFO !full\n");
- write_data(dev_priv, data_reg, packet.payload,
+ write_data(display, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ intel_de_write(display, MIPI_INTR_STAT(display, port),
GEN_READ_DATA_AVAIL);
}
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
ctrl_mask, 50)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- intel_de_write(dev_priv, ctrl_reg,
+ intel_de_write(display, ctrl_reg,
header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for read data.\n");
- read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ read_data(display, data_reg, msg->rx_buf, msg->rx_len);
}
/* XXX: fix for reads and writes */
@@ -225,9 +223,7 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
/* XXX: pipe, hs */
@@ -237,18 +233,18 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
- drm_dbg_kms(&dev_priv->drm,
+ if (cmd == intel_de_read(display, MIPI_DPI_CONTROL(display, port)))
+ drm_dbg_kms(display->drm,
"Same special packet %02x twice in a row.\n", cmd);
- intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100))
+ drm_err(display->drm,
"Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -273,8 +269,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -329,7 +324,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
bool cold_boot = false;
@@ -339,29 +334,30 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
+ intel_de_rmw(display, MIPI_CTRL(display, port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ u32 tmp = intel_de_read(display, MIPI_DEVICE_READY(display, port));
+
+ intel_de_rmw(display, MIPI_CTRL(display, port),
GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
+ drm_err(display->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY);
}
return cold_boot;
@@ -369,99 +365,100 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not ON\n");
+ drm_err(display->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) {
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_ULPS_NOT_ACTIVE, 20))
- drm_err(&dev_priv->drm, "ULPS not active\n");
+ drm_err(display->drm, "ULPS not active\n");
/* Exit ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK,
ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_LP_WAKE, 0);
}
}
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_DATA_LANE_STOP_STATE, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"D-PHY not entering LP-11 state\n");
}
}
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val = intel_de_read(display, MIPI_DEVICE_READY(display, port));
val &= ~ULPS_STATE_MASK;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
}
}
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -474,7 +471,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_ENTER);
usleep_range(2500, 3000);
@@ -482,14 +479,14 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, VLV_MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY);
usleep_range(2500, 3000);
}
@@ -509,50 +506,50 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"MIPI IO Port is not powergated\n");
}
}
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -564,30 +561,31 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
{
return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
@@ -596,15 +594,15 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
- intel_de_wait_for_clear(dev_priv, port_ctrl,
+ intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
- drm_err(&dev_priv->drm, "DSI LP not going Low\n");
+ drm_err(display->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
+ intel_de_rmw(display, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -612,6 +610,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -622,11 +621,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ intel_de_rmw(display, VLV_CHICKEN_3,
PIXEL_OVERLAP_CNT_MASK,
temp << PIXEL_OVERLAP_CNT_SHIFT);
}
@@ -636,7 +635,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
- temp = intel_de_read(dev_priv, port_ctrl);
+ temp = intel_de_read(display, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -656,15 +655,15 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_write(display, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(display, port_ctrl);
}
}
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -672,11 +671,12 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
- intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
+ intel_de_posting_read(display, port_ctrl);
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -726,6 +726,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -733,7 +734,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
enum port port;
bool glk_cold_boot = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_wait_panel_power_cycle(intel_dsi);
@@ -753,16 +754,16 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -798,8 +799,8 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
*/
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv,
- MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(display,
+ MIPI_MAX_RETURN_PKT_SIZE(display, port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -871,11 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -906,12 +908,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
@@ -919,7 +921,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -935,13 +937,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -960,7 +963,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
- bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
+ bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -969,27 +972,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
+ enabled = intel_de_read(display, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = intel_de_read(dev_priv,
- MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(display,
+ MIPI_DSI_FUNC_PRG(display, port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
+ if (drm_WARN_ON(display->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1010,8 +1013,7 @@ out_put_power:
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
@@ -1033,11 +1035,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(display, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(display, MIPI_DSI_FUNC_PRG(display, port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1049,24 +1051,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
- hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port));
+ hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1083,8 +1085,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
- vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1210,12 +1212,12 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-static void set_dsi_timings(struct drm_encoder *encoder,
+static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1256,29 +1258,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_HACTIVE(port),
adjusted_mode->crtc_hdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
adjusted_mode->crtc_vtotal);
}
- intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
hactive);
- intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(display, MIPI_HFP_COUNT(display, port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_HSYNC_PADDING_COUNT(display, port),
hsync);
- intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(display, MIPI_HBP_COUNT(display, port), hbp);
/* vertical values are in terms of lines */
- intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
- intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_VFP_COUNT(display, port), vfp);
+ intel_de_write(display, MIPI_VSYNC_PADDING_COUNT(display, port),
vsync);
- intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(display, MIPI_VBP_COUNT(display, port), vbp);
}
}
@@ -1299,21 +1301,20 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1329,31 +1330,31 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(display, MIPI_CTRL(display, PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ intel_de_write(display, MIPI_CTRL(display, PORT_A),
tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_EN(display, port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_de_write(display, MIPI_DPHY_PARAM(display, port),
intel_dsi->dphy_reg);
- intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ intel_de_write(display, MIPI_DPI_RESOLUTION(display, port),
adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
@@ -1381,7 +1382,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(display, MIPI_DSI_FUNC_PRG(display, port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1402,23 +1403,23 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode == BURST_MODE) {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_de_write(display, MIPI_LP_RX_TIMEOUT(display, port),
intel_dsi->lp_rx_timeout);
- intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_de_write(display, MIPI_TURN_AROUND_TIMEOUT(display, port),
intel_dsi->turn_arnd_val);
- intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_de_write(display, MIPI_DEVICE_RESET_TIMER(display, port),
intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
@@ -1429,16 +1430,16 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- intel_de_write(dev_priv,
- MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_de_write(display,
+ MIPI_INIT_COUNT(display, port == PORT_A ? PORT_C : PORT_A),
intel_dsi->init_count);
}
/* recovery disables */
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), tmp);
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
@@ -1446,7 +1447,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_de_write(display, MIPI_HIGH_LOW_SWITCH_COUNT(display, port),
intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
@@ -1455,14 +1456,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_de_write(display, MIPI_CLK_LANE_TIMING(display, port),
intel_dsi->dphy_reg);
}
@@ -1471,10 +1472,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_de_write(display, MIPI_DBI_BW_CTRL(display, port),
intel_dsi->bw_timer);
- intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_de_write(display, MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi)) {
@@ -1502,13 +1503,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
break;
}
- intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt);
+ intel_de_write(display, MIPI_VIDEO_MODE_FORMAT(display, port), fmt);
}
}
}
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1518,17 +1520,17 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
- intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
+ intel_de_rmw(display, MIPI_DSI_FUNC_PRG(display, port), VID_MODE_FORMAT_MASK, 0);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x1);
}
}
@@ -1592,8 +1594,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
@@ -1879,10 +1880,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct intel_dsi *intel_dsi;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
struct drm_display_mode *current_mode;
const struct dmi_system_id *dmi_id;
enum port port;
@@ -1903,64 +1902,61 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_dsi)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(intel_dsi);
return;
}
- intel_encoder = &intel_dsi->base;
- encoder = &intel_encoder->base;
- intel_dsi->attached_connector = intel_connector;
-
- connector = &intel_connector->base;
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- "DSI %c", port_name(port));
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
- intel_encoder->compute_config = intel_dsi_compute_config;
- intel_encoder->pre_enable = intel_dsi_pre_enable;
+ encoder->compute_config = intel_dsi_compute_config;
+ encoder->pre_enable = intel_dsi_pre_enable;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->enable = bxt_dsi_enable;
- intel_encoder->disable = intel_dsi_disable;
- intel_encoder->post_disable = intel_dsi_post_disable;
- intel_encoder->get_hw_state = intel_dsi_get_hw_state;
- intel_encoder->get_config = intel_dsi_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_dsi_shutdown;
+ encoder->enable = bxt_dsi_enable;
+ encoder->disable = intel_dsi_disable;
+ encoder->post_disable = intel_dsi_post_disable;
+ encoder->get_hw_state = intel_dsi_get_hw_state;
+ encoder->get_config = intel_dsi_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_dsi_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- intel_encoder->port = port;
- intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
- intel_encoder->cloneable = 0;
+ encoder->port = port;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->cloneable = 0;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->pipe_mask = BIT(PIPE_A);
+ encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->pipe_mask = BIT(PIPE_B);
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL);
- if (intel_connector->panel.vbt.dsi.config->dual_link)
+ if (connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1980,7 +1976,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
/* Use clock read-back from current hw-state for fastboot */
- current_mode = intel_encoder_current_mode(intel_encoder);
+ current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
@@ -1996,22 +1992,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
intel_dsi_vbt_gpio_init(intel_dsi,
- intel_dsi_get_hw_state(intel_encoder, &pipe));
+ intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs,
+ drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
mutex_lock(&dev_priv->drm.mode_config.mutex);
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ if (!intel_panel_preferred_fixed_mode(connector)) {
drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
@@ -2024,18 +2020,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
quirk_func(intel_dsi);
}
- intel_panel_init(intel_connector, NULL);
+ intel_panel_init(connector, NULL);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
- vlv_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(connector);
return;
err_cleanup_connector:
- drm_connector_cleanup(&intel_connector->base);
+ drm_connector_cleanup(&connector->base);
err:
- drm_encoder_cleanup(&intel_encoder->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
- kfree(intel_connector);
+ kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index ae0a0b11bae3..70c5a13a3c75 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -365,13 +365,13 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- u32 temp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 temp;
- temp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ temp = intel_de_read(display, MIPI_CTRL(display, port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
@@ -570,24 +570,24 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
}
static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index abbe427e462e..c1126d170ec6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,26 +11,23 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
-#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+#define _MIPI_MMIO_BASE(display) ((display)->dsi.mmio_base)
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
-#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+#define _MMIO_MIPI(base, port, a, c) _MMIO((base) + _MIPI_PORT(port, a, c))
/* BXT MIPI mode configure */
-#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
-#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
-#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+#define _BXT_MIPIA_TRANS_HACTIVE 0xb0f8
+#define _BXT_MIPIC_TRANS_HACTIVE 0xb8f8
+#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
-#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
-#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
-#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+#define _BXT_MIPIA_TRANS_VACTIVE 0xb0fc
+#define _BXT_MIPIC_TRANS_VACTIVE 0xb8fc
+#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
-#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
-#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
-#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+#define _BXT_MIPIA_TRANS_VTOTAL 0xb100
+#define _BXT_MIPIC_TRANS_VTOTAL 0xb900
+#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -38,14 +35,14 @@
#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
#define HS_IO_CTRL_SELECT (1 << 0)
-#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define _MIPIA_PORT_CTRL 0x61190
+#define _MIPIC_PORT_CTRL 0x61700
+#define VLV_MIPI_PORT_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
/* BXT port control */
-#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
-#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
-#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+#define _BXT_MIPIA_PORT_CTRL 0xb0c0
+#define _BXT_MIPIC_PORT_CTRL 0xb8c0
+#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
@@ -87,20 +84,17 @@
#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
-#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define _MIPIA_TEARING_CTRL 0x61194
+#define _MIPIC_TEARING_CTRL 0x61704
+#define VLV_MIPI_TEARING_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
-/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
-
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
-#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
-#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
+#define _MIPIA_DEVICE_READY 0xb000
+#define _MIPIC_DEVICE_READY 0xb800
+#define MIPI_DEVICE_READY(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -108,12 +102,12 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
-#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
-#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
-#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
-#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
+#define _MIPIA_INTR_STAT 0xb004
+#define _MIPIC_INTR_STAT 0xb804
+#define MIPI_INTR_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN 0xb008
+#define _MIPIC_INTR_EN 0xb808
+#define MIPI_INTR_EN(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -147,9 +141,9 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG 0xb00c
+#define _MIPIC_DSI_FUNC_PRG 0xb80c
+#define MIPI_DSI_FUNC_PRG(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -170,77 +164,77 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT 0xb010
+#define _MIPIC_HS_TX_TIMEOUT 0xb810
+#define MIPI_HS_TX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT 0xb014
+#define _MIPIC_LP_RX_TIMEOUT 0xb814
+#define MIPI_LP_RX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT 0xb018
+#define _MIPIC_TURN_AROUND_TIMEOUT 0xb818
+#define MIPI_TURN_AROUND_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER 0xb01c
+#define _MIPIC_DEVICE_RESET_TIMER 0xb81c
+#define MIPI_DEVICE_RESET_TIMER(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
-#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION 0xb020
+#define _MIPIC_DPI_RESOLUTION 0xb820
+#define MIPI_DPI_RESOLUTION(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE 0xb024
+#define _MIPIC_DBI_FIFO_THROTTLE 0xb824
+#define MIPI_DBI_FIFO_THROTTLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT 0xb028
+#define _MIPIC_HSYNC_PADDING_COUNT 0xb828
+#define MIPI_HSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
-#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
-#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
+#define _MIPIA_HBP_COUNT 0xb02c
+#define _MIPIC_HBP_COUNT 0xb82c
+#define MIPI_HBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
-#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
-#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
+#define _MIPIA_HFP_COUNT 0xb030
+#define _MIPIC_HFP_COUNT 0xb830
+#define MIPI_HFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+#define _MIPIA_HACTIVE_AREA_COUNT 0xb034
+#define _MIPIC_HACTIVE_AREA_COUNT 0xb834
+#define MIPI_HACTIVE_AREA_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+#define _MIPIA_VSYNC_PADDING_COUNT 0xb038
+#define _MIPIC_VSYNC_PADDING_COUNT 0xb838
+#define MIPI_VSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
-#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
-#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
+#define _MIPIA_VBP_COUNT 0xb03c
+#define _MIPIC_VBP_COUNT 0xb83c
+#define MIPI_VBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
-#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
-#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
+#define _MIPIA_VFP_COUNT 0xb040
+#define _MIPIC_VFP_COUNT 0xb840
+#define MIPI_VFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT 0xb044
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT 0xb844
+#define MIPI_HIGH_LOW_SWITCH_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
-#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
-#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL 0xb048
+#define _MIPIC_DPI_CONTROL 0xb848
+#define MIPI_DPI_CONTROL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -249,28 +243,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
-#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
-#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
+#define _MIPIA_DPI_DATA 0xb04c
+#define _MIPIC_DPI_DATA 0xb84c
+#define MIPI_DPI_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
-#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
-#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
+#define _MIPIA_INIT_COUNT 0xb050
+#define _MIPIC_INIT_COUNT 0xb850
+#define MIPI_INIT_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE 0xb054
+#define _MIPIC_MAX_RETURN_PKT_SIZE 0xb854
+#define MIPI_MAX_RETURN_PKT_SIZE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT 0xb058
+#define _MIPIC_VIDEO_MODE_FORMAT 0xb858
+#define MIPI_VIDEO_MODE_FORMAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -278,9 +271,9 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
-#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
-#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE 0xb05c
+#define _MIPIC_EOT_DISABLE 0xb85c
+#define MIPI_EOT_DISABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
@@ -292,36 +285,36 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
-#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
-#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK 0xb060
+#define _MIPIC_LP_BYTECLK 0xb860
+#define MIPI_LP_BYTECLK(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
-#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+#define _MIPIA_TLPX_TIME_COUNT 0xb0a4
+#define _MIPIC_TLPX_TIME_COUNT 0xb8a4
+#define MIPI_TLPX_TIME_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
-#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+#define _MIPIA_CLK_LANE_TIMING 0xb098
+#define _MIPIC_CLK_LANE_TIMING 0xb898
+#define MIPI_CLK_LANE_TIMING(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
-#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
-#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA 0xb064
+#define _MIPIC_LP_GEN_DATA 0xb864
+#define MIPI_LP_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
-#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
-#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
-#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
-#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA 0xb068
+#define _MIPIC_HS_GEN_DATA 0xb868
+#define MIPI_HS_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL 0xb06c
+#define _MIPIC_LP_GEN_CTRL 0xb86c
+#define MIPI_LP_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL 0xb070
+#define _MIPIC_HS_GEN_CTRL 0xb870
+#define MIPI_HS_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -332,9 +325,9 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
-#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT 0xb074
+#define _MIPIC_GEN_FIFO_STAT 0xb874
+#define MIPI_GEN_FIFO_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -350,16 +343,16 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE 0xb078
+#define _MIPIC_HS_LS_DBI_ENABLE 0xb878
+#define MIPI_HS_LP_DBI_ENABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
-#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
-#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM 0xb080
+#define _MIPIC_DPHY_PARAM 0xb880
+#define MIPI_DPHY_PARAM(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -369,34 +362,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
-#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
+#define _MIPIA_DBI_BW_CTRL 0xb084
+#define _MIPIC_DBI_BW_CTRL 0xb884
+#define MIPI_DBI_BW_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT 0xb088
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT 0xb888
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
-#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL 0xb08c
+#define _MIPIC_STOP_STATE_STALL 0xb88c
+#define MIPI_STOP_STATE_STALL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
-#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
-#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 0xb090
+#define _MIPIC_INTR_STAT_REG_1 0xb890
+#define MIPI_INTR_STAT_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 0xb094
+#define _MIPIC_INTR_EN_REG_1 0xb894
+#define MIPI_INTR_EN_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL(display) (_MIPI_MMIO_BASE(display) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -409,9 +402,9 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
-#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
-#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
+#define _MIPIA_CTRL 0xb104
+#define _MIPIC_CTRL 0xb904
+#define MIPI_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -442,41 +435,41 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
-#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
-#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS 0xb108
+#define _MIPIC_DATA_ADDRESS 0xb908
+#define MIPI_DATA_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
-#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
-#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH 0xb10c
+#define _MIPIC_DATA_LENGTH 0xb90c
+#define MIPI_DATA_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
-#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS 0xb110
+#define _MIPIC_COMMAND_ADDRESS 0xb910
+#define MIPI_COMMAND_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
-#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH 0xb114
+#define _MIPIC_COMMAND_LENGTH 0xb914
+#define MIPI_COMMAND_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
-#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 0xb118
+#define _MIPIC_READ_DATA_RETURN0 0xb918
+#define MIPI_READ_DATA_RETURN(display, port, n) _MMIO_MIPI(_MIPI_MMIO_BASE(display) + 4 * (n), port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
-#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
-#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID 0xb138
+#define _MIPIC_READ_DATA_VALID 0xb938
+#define MIPI_READ_DATA_VALID(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
#endif /* __VLV_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dcbfe32fd30c..81f65cab1330 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -879,6 +879,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = fpriv->i915;
int ret = 0;
switch (args->param) {
@@ -904,6 +905,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
+ case I915_CONTEXT_PARAM_LOW_LATENCY:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
+ else
+ ret = -EINVAL;
+ break;
+
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
@@ -992,6 +1000,9 @@ static int intel_context_set_gem(struct intel_context *ce,
if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
ret = intel_context_reconfigure_sseu(ce, sseu);
+ if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
+ __set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
+
return ret;
}
@@ -1630,6 +1641,9 @@ i915_gem_create_context(struct drm_i915_private *i915,
if (vm)
ctx->vm = vm;
+ /* Assign early so intel_context_set_gem can access these flags */
+ ctx->user_flags = pc->user_flags;
+
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
i915_gem_context_set_user_engines(ctx);
@@ -1652,8 +1666,6 @@ i915_gem_create_context(struct drm_i915_private *i915,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);
- ctx->user_flags = pc->user_flags;
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 03bc7f9d191b..b6d97da63d1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -338,6 +338,7 @@ struct i915_gem_context {
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
#define UCONTEXT_PERSISTENCE 4
+#define UCONTEXT_LOW_LATENCY 5
/**
* @flags: small set of booleans
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3a771afb083..42619fc05de4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -255,7 +255,6 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
- intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2457,7 +2456,7 @@ static int eb_submit(struct i915_execbuffer *eb)
* The engine index is returned.
*/
static unsigned int
-gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+gen8_dispatch_bsd_engine(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2465,7 +2464,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
+ get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
@@ -2686,7 +2685,6 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
- struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2710,17 +2708,10 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
- gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
- /*
- * Keep GT0 active on MTL so that i915_vma_parked() doesn't
- * free VMAs while execbuf ioctl is validating VMAs.
- */
- if (gt->info.id)
- eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2759,9 +2750,6 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
- if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
-
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@@ -2775,12 +2763,6 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
- /*
- * This works in conjunction with eb_select_engine() to prevent
- * i915_vma_parked() from interfering while execbuf validates vmas.
- */
- if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0c5cdab278b6..1495b6074492 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -386,7 +386,7 @@ struct drm_i915_gem_object {
* and kernel mode driver for caching policy control after GEN12.
* In the meantime platform specific tables are created to translate
* i915_cache_level into pat index, for more details check the macros
- * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
+ * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL.
* For backward compatibility, this field contains values exactly match
* the entries of enum i915_cache_level for pre-GEN12 platforms (See
* LEGACY_CACHELEVEL), so that the PTE encode functions for these
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 38b72d86560f..c5e1c718a6d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -654,7 +654,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
@@ -663,8 +663,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
resource_size_t offset;
int err;
- GEM_WARN_ON(IS_DGFX(dev_priv));
- obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ GEM_WARN_ON(IS_DGFX(i915));
+ obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 258381d1c054..dfe0db8bb1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -14,14 +14,14 @@ struct drm_i915_gem_object;
#define i915_stolen_fb drm_mm_node
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
@@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a049ca0b7980..d9eb84c1d2f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -343,12 +343,12 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -374,9 +374,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -427,11 +427,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -447,10 +447,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
@@ -459,7 +459,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 61abfb505766..09b68713ab32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -463,13 +463,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
struct drm_file *file)
{
static struct lock_class_key __maybe_unused lock_class;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object __maybe_unused *obj;
int __maybe_unused ret;
u32 __maybe_unused handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -501,7 +501,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!to_gt(dev_priv)->vm->has_read_only)
+ if (!to_gt(i915)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 3ff3d8889c6c..84d41e6ccf05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
{
struct drm_i915_private *i915 = arg;
unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long max_pages;
@@ -857,7 +857,7 @@ out:
static int igt_ppgtt_64K(void *arg)
{
struct drm_i915_private *i915 = arg;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
@@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
};
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
struct i915_ppgtt *ppgtt;
int err;
- dev_priv = mock_gem_device();
- if (!dev_priv)
+ i915 = mock_gem_device();
+ if (!i915)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(i915)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mock_destroy_device(dev_priv);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 10a7847f1b04..bac15196b4d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (gen < 12)
return true;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
return HAS_DISPLAY(i915);
@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
src_pitch = t->width; /* in dwords */
if (src->tiling == CLIENT_TILING_Y) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
} else if (src->tiling == CLIENT_TILING_X) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
dst_pitch = t->width; /* in dwords */
if (dst->tiling == CLIENT_TILING_Y) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
} else if (dst->tiling == CLIENT_TILING_X) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
v += x;
swizzle = gt->ggtt->bit_6_swizzle_x;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d684a70f2c04..65a931ea80e9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_selftest.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "mock_context.h"
#include "mock_dmabuf.h"
@@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915,
struct file *file;
u32 *vaddr;
int err = 0, i;
+ unsigned int mode;
file = mock_file(i915);
if (IS_ERR(file))
@@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_file;
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index e1bf13e3d307..e9f65f27b53f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -189,9 +189,6 @@ static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
- if (IS_PONTEVECCHIO(engine->i915))
- return false;
-
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
@@ -743,21 +740,25 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
}
/* Wa_14014475959:dg2 */
-#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
-static u32 ccs_semaphore_offset(struct i915_request *rq)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+#define HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 hold_switchout_semaphore_offset(struct i915_request *rq)
{
return i915_ggtt_offset(rq->context->state) +
- (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+ (LRC_PPHWSP_PN * PAGE_SIZE) + HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET;
}
/* Wa_14014475959:dg2 */
-static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+static u32 *hold_switchout_emit_wa_busywait(struct i915_request *rq, u32 *cs)
{
int i;
*cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
MI_ATOMIC_MOVE;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
*cs++ = 1;
@@ -773,7 +774,7 @@ static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
return cs;
@@ -790,8 +791,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
cs = gen12_emit_preempt_busywait(rq, cs);
/* Wa_14014475959:dg2 */
- if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
- cs = ccs_emit_wa_busywait(rq, cs);
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (intel_engine_uses_wa_hold_switchout(rq->engine))
+ cs = hold_switchout_emit_wa_busywait(rq, cs);
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -827,7 +830,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index fa46d2308b0e..398d60a66410 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -500,11 +500,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
static void
-xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- struct sgt_dma *iter,
- unsigned int pat_index,
- u32 flags)
+xehp_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ struct sgt_dma *iter,
+ unsigned int pat_index,
+ u32 flags)
{
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
@@ -741,8 +741,8 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma_res);
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
- xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
+ if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55))
+ xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
else
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
} else {
@@ -781,11 +781,11 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
-static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
struct i915_page_directory * const pdp =
@@ -810,15 +810,15 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
}
-static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
if (flags & PTE_LM)
- return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
- pat_index, flags);
+ return xehp_ppgtt_insert_entry_lm(vm, addr, offset,
+ pat_index, flags);
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
}
@@ -961,6 +961,9 @@ static int gen8_init_rsvd(struct i915_address_space *vm)
struct i915_vma *vma;
int ret;
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
/* The memory will be used only by GPU. */
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
I915_BO_ALLOC_VOLATILE |
@@ -1042,7 +1045,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
if (HAS_64K_PAGES(gt->i915))
- ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
+ ppgtt->vm.insert_page = xehp_ppgtt_insert_entry;
else
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 7eccbd70d89f..ed95a7b57cbb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -130,6 +130,7 @@ struct intel_context {
#define CONTEXT_PERMA_PIN 11
#define CONTEXT_IS_PARKING 12
#define CONTEXT_EXITING 13
+#define CONTEXT_LOW_LATENCY 14
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1ade568ffbfa..5c8e9ee3b008 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -497,9 +497,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
- if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
- __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
- engine->class == RENDER_CLASS)
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
/* features common between engines sharing EUs */
@@ -589,7 +588,7 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -610,7 +609,7 @@ u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -679,7 +678,7 @@ void intel_engines_release(struct intel_gt *gt)
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
@@ -765,14 +764,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
- if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
@@ -839,38 +838,6 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
}
}
-static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_gt_info *info = &gt->info;
- unsigned long meml3_mask;
- unsigned long quad;
-
- if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
- return;
-
- meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
- meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
-
- /*
- * Link Copy engines may be fused off according to meml3_mask. Each
- * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
- */
- for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
- unsigned int instance = quad * 2 + 1;
- intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
- _BCS(instance));
-
- if (mask & info->engine_mask) {
- gt_dbg(gt, "bcs%u fused off\n", instance);
- gt_dbg(gt, "bcs%u fused off\n", instance + 1);
-
- info->engine_mask &= ~mask;
- }
- }
-}
-
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
@@ -889,7 +856,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
- engine_mask_apply_copy_fuses(gt);
/*
* The only use of the GSC CS is to load and communicate with the GSC
@@ -908,6 +874,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
@@ -1193,7 +1176,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = xehp_regs;
num = ARRAY_SIZE(xehp_regs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 96bdb93a948d..fb7bff27b45a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
-
if (engine->park)
engine->park(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 960e6be2042f..ba55c059063d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -586,7 +586,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
-#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
+#define I915_ENGINE_USES_WA_HOLD_SWITCHOUT BIT(12)
unsigned int flags;
/*
@@ -696,10 +696,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
}
/* Wa_14014475959:dg2 */
+/* Wa_16019325821 */
+/* Wa_14019159160 */
static inline bool
-intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+intel_engine_uses_wa_hold_switchout(struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
}
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42aade0faf2d..21829439e686 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq)
/* Use a fixed tag for OA and friends */
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
ce->lrc.ccid = ce->tag;
- } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
/* We don't need a strict matching tag, just different values */
unsigned int tag = ffs(READ_ONCE(engine->context_tag));
@@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
intel_engine_add_retire(engine, ce->timeline);
ccid = ce->lrc.ccid;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
ccid &= XEHP_MAX_CONTEXT_HW_ID;
} else {
@@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
promote = xehp_csb_parse(csb);
else if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb);
@@ -2898,7 +2898,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
+ intel_gt_reset_engine(engine);
}
/*
@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
+
+ /* Reset upon idling, or we may delay the busy wakeup. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
@@ -3479,7 +3482,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
}
}
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
if (intel_engine_has_preemption(engine))
engine->emit_bb_start = xehp_emit_bb_start;
else
@@ -3582,7 +3585,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (GRAPHICS_VER(engine->i915) >= 11 &&
- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index ec1cbe229f0e..0d0a0dc9f610 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -231,11 +231,8 @@ static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
-
- intel_guc_invalidate_tlb_guc(guc);
- }
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
+ intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -246,7 +243,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
+ if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
guc_ggtt_ct_invalidate(gt);
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 6d440de8ba01..1e925c75fb08 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -103,19 +103,6 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
-static const struct gsc_def gsc_def_xehpsdv[] = {
- {
- /* HECI1 not enabled on the device. */
- },
- {
- .name = "mei-gscfi",
- .bar = DG1_GSC_HECI2_BASE,
- .bar_size = GSC_BAR_LENGTH,
- .use_polling = true,
- .slow_firmware = true,
- }
-};
-
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
@@ -188,8 +175,6 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
- } else if (IS_XEHPSDV(i915)) {
- def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a425db5ed3a2..626b166e67ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
intel_uncore_posting_read(uncore,
XELPMP_RING_FAULT_REG);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
@@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
@@ -832,7 +832,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 608f5c872928..b5e114d284ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,17 +82,18 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
-#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
- engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
-
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -123,6 +124,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return guc_to_gt(guc)->i915;
}
+static inline struct intel_guc *gt_to_guc(struct intel_gt *gt)
+{
+ return &gt->uc.guc;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 000000000000..044219c5960a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (CCS_MASK(gt) & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 000000000000..9e5549caeb26
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 77fb57223465..ad4c51f18d3a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -68,9 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
- return guc_irq_handler(&gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(gt), iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
- return guc_irq_handler(&media_gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(media_gt), iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
@@ -442,7 +442,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(iir)) {
gen6_rps_irq_handler(&gt->rps, iir);
- guc_irq_handler(&gt->uc.guc, iir >> 16);
+ guc_irq_handler(gt_to_guc(gt), iir >> 16);
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index e253750a51c5..b8912bd6c08e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -57,51 +57,18 @@ static const struct intel_mmio_range icl_l3bank_steering_table[] = {
* are of a "GAM" subclass that has special rules. Thus we use a separate
* GAM table farther down for those.
*/
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+static const struct intel_mmio_range dg2_mslice_steering_table[] = {
{ 0x00DD00, 0x00DDFF },
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
{},
};
-static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D880, 0x00D8FF },
{},
};
-/*
- * We have several types of MCR registers on PVC where steering to (0,0)
- * will always provide us with a non-terminated value. We'll stick them
- * all in the same table for simplicity.
- */
-static const struct intel_mmio_range pvc_instance0_steering_table[] = {
- { 0x004000, 0x004AFF }, /* HALF-BSLICE */
- { 0x008800, 0x00887F }, /* CC */
- { 0x008A80, 0x008AFF }, /* TILEPSMI */
- { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
- { 0x00B100, 0x00B3FF }, /* L3BANK */
- { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
- { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
- { 0x00DD00, 0x00DDFF }, /* BSLICE */
- { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
- { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
- { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
- { 0x024180, 0x0241FF }, /* HALF-BSLICE */
- {},
-};
-
static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
{ 0x000B00, 0x000BFF }, /* SQIDI */
{ 0x001000, 0x001FFF }, /* SQIDI */
@@ -185,22 +152,16 @@ void intel_gt_mcr_init(struct intel_gt *gt)
gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
gt->steering_table[DSS] = xelpg_dss_steering_table;
- } else if (IS_PONTEVECCHIO(i915)) {
- gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
} else if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[MSLICE] = dg2_mslice_steering_table;
gt->steering_table[LNCF] = dg2_lncf_steering_table;
/*
* No need to hook up the GAM table since it has a dedicated
* steering control register on DG2 and can use implicit
* steering.
*/
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- gt->steering_table[GAM] = xehpsdv_gam_steering_table;
} else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
@@ -821,8 +782,6 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
for (int i = 0; i < NUM_STEERING_TYPES; i++)
if (gt->steering_table[i])
report_steering_type(p, gt, i, dump_table);
- } else if (IS_PONTEVECCHIO(gt->i915)) {
- report_steering_type(p, gt, INSTANCE0, dump_table);
} else if (HAS_MSLICE_STEERING(gt->i915)) {
report_steering_type(p, gt, MSLICE, dump_table);
report_steering_type(p, gt, LNCF, dump_table);
@@ -842,10 +801,7 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance)
{
- if (IS_PONTEVECCHIO(gt->i915)) {
- *group = dss / GEN_DSS_PER_CSLICE;
- *instance = dss % GEN_DSS_PER_CSLICE;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index 01ac565a56a4..a67a4c35a4fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
* the topology, so we lookup the DSS ID directly in "slice 0."
*/
#define _HAS_SS(ss_, gt_, group_, instance_) ( \
- GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
+ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 220ac4f92edf..c08fdb65cc69 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -159,7 +159,7 @@ static bool reset_engines(struct intel_gt *gt)
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ return intel_gt_reset_all_engines(gt) == 0;
}
static void gt_sanitize(struct intel_gt *gt, bool force)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 7114c116e928..4fcba42cfe34 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -392,10 +392,6 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
-
intel_runtime_pm_put(uncore->rpm, wakeref);
}
@@ -538,7 +534,7 @@ static bool rps_eval(void *data)
{
struct intel_gt *gt = data;
- if (intel_guc_slpc_is_used(&gt->uc.guc))
+ if (intel_guc_slpc_is_used(gt_to_guc(gt)))
return false;
else
return HAS_RPS(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 50962cfd1353..e42b3a5d4e63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -718,44 +718,11 @@
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
-#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
-#define UNSLCGCTL9440 _MMIO(0x9440)
-#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
-#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
-#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
-#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
-
-#define UNSLCGCTL9444 _MMIO(0x9444)
-#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
-#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
-#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
-#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
-#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
-#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
-#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
-#define LTCDD_CLKGATE_DIS REG_BIT(10)
-
#define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
@@ -765,9 +732,6 @@
#define L3_CLKGATE_DIS REG_BIT(16)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
-#define SCCGCTL94DC MCR_REG(0x94dc)
-#define CG3DDISURB REG_BIT(14)
-
#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
@@ -989,10 +953,6 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
-#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
-#define XEHPC_HOSTCACHEEN REG_BIT(1)
-#define XEHPC_OVRLSCCC REG_BIT(0)
-
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
/* MOCS (Memory Object Control State) registers */
@@ -1046,20 +1006,9 @@
#define XEHP_L3SQCREG5 MCR_REG(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
-#define MLTICTXCTL MCR_REG(0xb170)
-#define TDONRENDER REG_BIT(2)
-
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
-#define XEHPC_L3SCRUB MCR_REG(0xb18c)
-#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
-#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
-#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
-
-#define L3SQCREG1_CCS0 MCR_REG(0xb200)
-#define FLUSHALLNONCOH REG_BIT(5)
-
#define GEN11_GLBLINVL _MMIO(0xb404)
#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
@@ -1109,7 +1058,6 @@
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
-#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
#define COMP_MOD_CTRL MCR_REG(0xcf30)
#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
@@ -1185,7 +1133,6 @@
#define EU_PERF_CNTL4 PERF_REG(0xe45c)
#define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c)
-#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
@@ -1202,7 +1149,6 @@
#define FLOW_CONTROL_ENABLE REG_BIT(15)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
-#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
#define STALL_DOP_GATING_DISABLE REG_BIT(5)
#define THROTTLE_12_5 REG_GENMASK(4, 2)
@@ -1215,6 +1161,7 @@
#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define XELPG_DISABLE_TDL_SVHS_GATING REG_BIT(1)
#define GEN12_DISABLE_DOP_GATING REG_BIT(0)
#define RT_CTRL MCR_REG(0xe530)
@@ -1477,8 +1424,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
@@ -1679,11 +1632,6 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
-#define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004)
-#define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008)
-#define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068)
-#define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c)
-
/*
* Standalone Media's non-engine GT registers are located at their regular GT
* offsets plus 0x380000. This extra offset is stored inside the intel_uncore
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index c0b202223940..d7784650e4d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -442,7 +442,7 @@ static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
}
@@ -452,7 +452,7 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
int err;
u32 val;
@@ -573,7 +573,6 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
intel_wakeref_t wakeref;
u32 mode;
@@ -581,20 +580,12 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
*/
- if (IS_XEHPSDV(gt->i915) &&
- slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
- /*
- * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
- * the media_ratio_mode, just return the cached media ratio
- */
- mode = slpc->media_ratio_mode;
- } else {
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
- mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
- }
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
}
@@ -604,7 +595,7 @@ static ssize_t media_freq_factor_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
u32 factor, mode;
int err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7811a8c9da06..30b128b1fde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt);
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7c367ba8d9dc..b387146ede98 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -546,47 +546,6 @@ static const u8 gen12_rcs_offsets[] = {
END
};
-static const u8 xehp_rcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
-
- NOP(5),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- LRI(3, POSTED),
- REG(0x1b0),
- REG16(0x5a8),
- REG16(0x5ac),
-
- NOP(6),
- LRI(1, 0),
- REG(0x0c8),
-
- END
-};
-
static const u8 dg2_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
@@ -695,8 +654,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
return mtl_rcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_rcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
- return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 11)
@@ -719,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60;
@@ -733,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x80;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x70;
@@ -748,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x84;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74;
@@ -795,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
* simply to match the RCS context image layout.
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 576e5ef0289b..6f7af4077135 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -35,9 +35,9 @@ static bool engine_supports_migration(struct intel_engine_cs *engine)
return true;
}
-static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_toggle_pdes(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -52,9 +52,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
d->offset += SZ_2M;
}
-static void xehpsdv_insert_pte(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_insert_pte(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -120,7 +120,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
* 512 entry layout using 4K GTT pages. The other two windows just map
* lmem pages and must use the new compact 32 entry layout using 64K GTT
* pages, which ensures we can address any lmem object that the user
- * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
+ * throws at us. We then also use the xehp_toggle_pdes as a way of
* just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
* compact layout for each of these page-tables, that fall within the
* [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
@@ -209,12 +209,12 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
if (HAS_64K_PAGES(gt->i915)) {
vm->vm.foreach(&vm->vm, base, d.offset - base,
- xehpsdv_insert_pte, &d);
+ xehp_insert_pte, &d);
d.offset = base + CHUNK_SZ;
vm->vm.foreach(&vm->vm,
d.offset,
2 * CHUNK_SZ,
- xehpsdv_toggle_pdes, &d);
+ xehp_toggle_pdes, &d);
} else {
vm->vm.foreach(&vm->vm, base, d.offset - base,
insert_pte, &d);
@@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8)
ring_sz = 8;
@@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 25c1023eb5f9..d791d63d49b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -53,7 +53,6 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
-#define PVC_NUM_MOCS_ENTRIES 3
#define MTL_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
@@ -367,31 +366,6 @@ static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
L3_3_WB),
};
-static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
- /* wa_1608975824 */
- MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
-
- /* UC - Coherent; GO:L3 */
- MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
- /* UC - Coherent; GO:Memory */
- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Non-Coherent; GO:Memory */
- MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
- /* UC - Non-Coherent; GO:L3 */
- MOCS_ENTRY(4, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
-
- /* HW Reserved - SW program but never use. */
- MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
- MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
- MOCS_ENTRY(60, 0, L3_1_UC),
- MOCS_ENTRY(61, 0, L3_1_UC),
- MOCS_ENTRY(62, 0, L3_1_UC),
- MOCS_ENTRY(63, 0, L3_1_UC),
-};
-
static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
/* UC - Coherent; GO:L3 */
MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
@@ -404,17 +378,6 @@ static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
-static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
- /* Error */
- MOCS_ENTRY(0, 0, L3_3_WB),
-
- /* UC */
- MOCS_ENTRY(1, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(2, 0, L3_3_WB),
-};
-
static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
/* Error - Reserved for Non-Use */
MOCS_ENTRY(0,
@@ -501,25 +464,12 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915,
table->n_entries = MTL_NUM_MOCS_ENTRIES;
table->uc_index = 9;
table->unused_entries_index = 1;
- } else if (IS_PONTEVECCHIO(i915)) {
- table->size = ARRAY_SIZE(pvc_mocs_table);
- table->table = pvc_mocs_table;
- table->n_entries = PVC_NUM_MOCS_ENTRIES;
- table->uc_index = 1;
- table->wb_index = 2;
- table->unused_entries_index = 2;
} else if (IS_DG2(i915)) {
table->size = ARRAY_SIZE(dg2_mocs_table);
table->table = dg2_mocs_table;
table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->unused_entries_index = 3;
- } else if (IS_XEHPSDV(i915)) {
- table->size = ARRAY_SIZE(xehpsdv_mocs_table);
- table->table = xehpsdv_mocs_table;
- table->uc_index = 2;
- table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->unused_entries_index = 5;
} else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
@@ -670,7 +620,7 @@ static void init_l3cc_table(struct intel_gt *gt,
intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 8f4b3c8af09c..c864d101faf9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* thus allowing GuC to control RC6 entry/exit fully instead.
* We will not set the HW ENABLE and EI bits
*/
- if (!intel_guc_rc_enable(&gt->uc.guc))
+ if (!intel_guc_rc_enable(gt_to_guc(gt)))
rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
else
rc6->ctl_enable =
@@ -569,7 +569,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
struct intel_gt *gt = rc6_to_gt(rc6);
/* Take control of RC6 back from GuC */
- intel_guc_rc_disable(&gt->uc.guc);
+ intel_guc_rc_disable(gt_to_guc(gt));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c8e9aa41fdea..6161f7a3ff70 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -764,7 +764,7 @@ wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
HECI_H_GS1_ER_PREP, 0);
}
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -879,8 +879,17 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- /* For GuC mode, ensure submission is disabled before stopping ring */
- intel_uc_reset_prepare(&gt->uc);
+ /**
+ * For GuC mode with submission enabled, ensure submission
+ * is disabled before stopping ring.
+ *
+ * For GuC mode with submission disabled, ensure that GuC is not
+ * sanitized, do that after engine reset. reset_prepare()
+ * is followed by engine reset which in this mode requires GuC to
+ * process any CSB FIFO entries generated by the resets.
+ */
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
@@ -978,7 +987,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
@@ -1089,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ ok = intel_gt_reset_all_engines(gt) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
@@ -1133,10 +1142,10 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
}
if (err)
return err;
@@ -1227,6 +1236,9 @@ void intel_gt_reset(struct intel_gt *gt,
intel_overlay_reset(gt->i915);
+ /* sanitize uC after engine reset */
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1270,7 +1282,30 @@ error:
goto finish;
}
-static int intel_gt_reset_engine(struct intel_engine_cs *engine)
+/**
+ * intel_gt_reset_all_engines() - Reset all engines in the given gt.
+ * @gt: the GT to reset all engines for.
+ *
+ * This function resets all engines within the given gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_all_engines(struct intel_gt *gt)
+{
+ return __intel_gt_reset(gt, ALL_ENGINES);
+}
+
+/**
+ * intel_gt_reset_engine() - Reset a specific engine within a gt.
+ * @engine: engine to be reset.
+ *
+ * This function resets the specified engine within a gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index f615b30b81c5..c00de353075c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -54,7 +54,8 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
+int intel_gt_reset_engine(struct intel_engine_cs *engine);
+int intel_gt_reset_all_engines(struct intel_gt *gt);
int intel_reset_guc(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 4feef874e6d6..c9cb2a391942 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -52,7 +52,7 @@ static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- return &gt->uc.guc.slpc;
+ return &gt_to_guc(gt)->slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps)
@@ -1013,6 +1013,10 @@ void intel_rps_boost(struct i915_request *rq)
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
+ /* Waitboost is not needed for contexts marked with a Freq hint */
+ if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags))
+ return;
+
/* Serializes with i915_request_retire() */
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
@@ -1086,11 +1090,7 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
- if (IS_PONTEVECCHIO(i915))
- return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
- else if (IS_XEHPSDV(i915))
- return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
- else if (IS_GEN9_LP(i915))
+ if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
else
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6a3246240e81..c8fadf58d836 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -214,13 +214,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt)
int num_compute_regs, num_geometry_regs;
int eu;
- if (IS_PONTEVECCHIO(gt->i915)) {
- num_geometry_regs = 0;
- num_compute_regs = 2;
- } else {
- num_geometry_regs = 1;
- num_compute_regs = 1;
- }
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
/*
* The concept of slice has been removed in Xe_HP. To be compatible
@@ -642,7 +637,7 @@ void intel_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
@@ -851,7 +846,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
{
if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
sseu_print_xehp_topology(sseu, p);
else
sseu_print_hsw_topology(sseu, p);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 4bb13d1890e3..756e9ebbc725 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -132,7 +132,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index d67d44611c28..40e79f0dc257 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,12 +10,15 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "display/intel_fbc_regs.h"
+
/**
* DOC: Hardware workarounds
*
@@ -51,7 +54,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -258,12 +262,6 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
-wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
-{
- wa_mcr_write_clr_set(wal, reg, ~0, set);
-}
-
-static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
@@ -918,12 +916,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
- else if (IS_PONTEVECCHIO(i915))
- ; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
- else if (IS_XEHPSDV(i915))
- ; /* noop; none at this time */
else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -1350,9 +1344,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
gt->steering_table[MSLICE] = NULL;
}
- if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
- gt->steering_table[GAM] = NULL;
-
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
GEN_DSS_PER_GSLICE;
@@ -1380,20 +1371,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- unsigned int dss;
-
- /*
- * Setup implicit steering for COMPUTE and DSS ranges to the first
- * non-fused-off DSS. All other types of MCR registers will be
- * explicitly steered.
- */
- dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
- __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
-}
-
-static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1520,76 +1497,6 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- xehp_init_mcr(gt, wal);
-
- /* Wa_1409757795:xehpsdv */
- wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
-
- /* Wa_16011155590:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- TSGUNIT_CLKGATE_DIS);
-
- /* Wa_14011780169:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
- wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
- GAMTLBVDBOX7_CLKGATE_DIS |
- GAMTLBVDBOX6_CLKGATE_DIS |
- GAMTLBVDBOX5_CLKGATE_DIS |
- GAMTLBVDBOX4_CLKGATE_DIS |
- GAMTLBVDBOX3_CLKGATE_DIS |
- GAMTLBVDBOX2_CLKGATE_DIS |
- GAMTLBVDBOX1_CLKGATE_DIS |
- GAMTLBVDBOX0_CLKGATE_DIS |
- GAMTLBKCR_CLKGATE_DIS |
- GAMTLBGUC_CLKGATE_DIS |
- GAMTLBBLT_CLKGATE_DIS);
- wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
- GAMTLBGFXA1_CLKGATE_DIS |
- GAMTLBCOMPA0_CLKGATE_DIS |
- GAMTLBCOMPA1_CLKGATE_DIS |
- GAMTLBCOMPB0_CLKGATE_DIS |
- GAMTLBCOMPB1_CLKGATE_DIS |
- GAMTLBCOMPC0_CLKGATE_DIS |
- GAMTLBCOMPC1_CLKGATE_DIS |
- GAMTLBCOMPD0_CLKGATE_DIS |
- GAMTLBCOMPD1_CLKGATE_DIS |
- GAMTLBMERT_CLKGATE_DIS |
- GAMTLBVEBOX3_CLKGATE_DIS |
- GAMTLBVEBOX2_CLKGATE_DIS |
- GAMTLBVEBOX1_CLKGATE_DIS |
- GAMTLBVEBOX0_CLKGATE_DIS);
- }
-
- /* Wa_16012725990:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
-
- /* Wa_14011060649:xehpsdv */
- wa_14011060649(gt, wal);
-
- /* Wa_14012362059:xehpsdv */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_14014368820:xehpsdv */
- wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
- INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
-
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-}
-
-static void
dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
xehp_init_mcr(gt, wal);
@@ -1632,27 +1539,10 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- pvc_init_mcr(gt, wal);
-
- /* Wa_14015795083 */
- wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
-
- /* Wa_18018781329 */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
-}
-
-static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1724,12 +1614,6 @@ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
- if (IS_PONTEVECCHIO(gt->i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
if (IS_DG2(gt->i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
@@ -1754,12 +1638,8 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
- else if (IS_PONTEVECCHIO(i915))
- pvc_gt_workarounds_init(gt, wal);
else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
- else if (IS_XEHPSDV(i915))
- xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -2177,30 +2057,6 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
}
}
-static void blacklist_trtt(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- /*
- * Prevent read/write access to [0x4400, 0x4600) which covers
- * the TRTT range across all engines. Note that normally userspace
- * cannot access the other engines' trtt control, but for simplicity
- * we cover the entire range on each engine.
- */
- whitelist_reg_ext(w, _MMIO(0x4400),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
- whitelist_reg_ext(w, _MMIO(0x4500),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
-}
-
-static void pvc_whitelist_build(struct intel_engine_cs *engine)
-{
- /* Wa_16014440446:pvc */
- blacklist_trtt(engine);
-}
-
static void xelpg_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -2227,12 +2083,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
; /* none yet */
else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
- else if (IS_PONTEVECCHIO(i915))
- pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
- else if (IS_XEHPSDV(i915))
- ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2813,10 +2665,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
- /* Wa_14014999345:pvc */
- wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
- }
+ /* boilerplate for any CCS engine workaround */
}
/*
@@ -2849,10 +2698,32 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ intel_gt_apply_ccs_mode(gt);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2891,10 +2762,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
+ /* Wa_14020495402 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
+ }
+
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
/*
@@ -2922,21 +2797,15 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
- IS_PONTEVECCHIO(i915) ||
IS_DG2(i915)) {
/* Wa_22014226127 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
}
- if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
- /* Wa_16015675438:dg2,pvc */
- wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
- }
-
- if (IS_DG2(i915)) {
/*
* Wa_16011620976:dg2_g11
* Wa_22015475538:dg2
@@ -2972,22 +2841,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0 /* write-only, so skip validation */,
true);
}
-
- if (IS_XEHPSDV(i915)) {
- /* Wa_1409954639 */
- wa_mcr_masked_en(wal,
- GEN8_ROW_CHICKEN,
- SYSTOLIC_DOP_CLOCK_GATING_DIS);
-
- /* Wa_1607196519 */
- wa_mcr_masked_en(wal,
- GEN9_ROW_CHICKEN4,
- GEN12_DISABLE_GRF_CLEAR);
-
- /* Wa_14010449647:xehpsdv */
- wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
- }
}
static void
@@ -3003,8 +2856,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
@@ -3068,7 +2923,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
const struct i915_range *mcr_ranges;
int i;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0dd4d00ee894..9ce8ff1c04fe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -319,7 +319,7 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
timeout = 0;
- intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
+ intel_wedge_on_timeout(&w, gt, HZ / 5 /* 200ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index f40de408cd3a..2cfc23c58e90 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -281,7 +281,7 @@ static int igt_atomic_reset(void *arg)
awake = reset_prepare(gt);
p->critical_section_begin();
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
p->critical_section_end();
reset_finish(gt, awake);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 302d0540295d..4ecc4ae74a54 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -53,7 +53,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
static int slpc_set_freq(struct intel_gt *gt, u32 freq)
{
int err;
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
err = slpc_set_max_freq(slpc, freq);
if (err) {
@@ -182,7 +182,7 @@ static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct {
u64 power;
int freq;
@@ -262,7 +262,7 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int run_test(struct intel_gt *gt, int test_type)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 811add10c30d..c34674e797c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -207,6 +207,27 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+struct slpc_context_frequency_request {
+ u32 frequency_request:16;
+ u32 reserved:12;
+ u32 is_compute:1;
+ u32 ignore_busyness:1;
+ u32 is_minimum:1;
+ u32 is_predefined:1;
+} __packed;
+
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+
+struct slpc_optimized_strategies {
+ u32 compute:1;
+ u32 async_flip:1;
+ u32 media:1;
+ u32 vsync_flip:1;
+ u32 reserved:28;
+} __packed;
+
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index dabeaf4f245f..00d6402333f8 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -36,6 +36,7 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 58012edd4eb0..bebf28e3c479 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -101,4 +101,11 @@ enum {
GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
};
+/*
+ * Workaround keys:
+ */
+enum {
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e2e42b3e0d5d..3b69bc6616bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -298,7 +298,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- intel_guc_write_barrier(&gt->uc.guc);
+ intel_guc_write_barrier(gt_to_guc(gt));
i915_gem_object_unpin_map(gsc->fw.obj);
@@ -351,7 +351,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 40817ebcca71..a7d5465655f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
+ GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2b450c43bbd7..5e60a34692af 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */
@@ -294,6 +294,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ flags |= GUC_WA_RCS_CCS_SWITCHOUT;
+
/*
* Wa_14012197797
* Wa_22011391025
@@ -315,15 +320,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_G11(gt->i915))
flags |= GUC_WA_CONTEXT_ISOLATION;
- /* Wa_16015675438 */
- if (!RCS_MASK(gt))
- flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
-
- /* Wa_14018913170 */
- if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
- if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- }
+ /*
+ * Wa_14018913170: Applicable to all platforms supported by i915 so
+ * don't bother testing for all X/Y/Z platforms explicitly.
+ */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index be70c46604b4..57b903132776 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -204,6 +204,8 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_waklv_size: size of workaround KLVs */
+ u32 ads_waklv_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index f7372f736a77..c606bb5e3b7b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -46,6 +46,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_waklv_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_waklv_size);
+}
+
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
@@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_capture_offset(struct intel_guc *guc)
+static u32 guc_ads_waklv_offset(struct intel_guc *guc)
{
u32 offset;
@@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_waklv_offset(guc) +
+ guc_ads_waklv_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
@@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
@@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
-#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
@@ -796,6 +815,65 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
+/* Wa_14019159160 */
+static u32 guc_waklv_ra_mode(struct intel_guc *guc, u32 offset, u32 remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+
+ size = sizeof(klv_entry);
+ GEM_BUG_ON(remain < size);
+
+ iosys_map_memcpy_to(&guc->ads_map, offset, klv_entry, size);
+
+ return size;
+}
+
+static void guc_waklv_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 offset, addr_ggtt, remain, size;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+ offset = guc_ads_waklv_offset(guc);
+ remain = guc_ads_waklv_size(guc);
+
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ size = guc_waklv_ra_mode(guc, offset, remain);
+ offset += size;
+ remain -= size;
+ }
+
+ size = guc_ads_waklv_size(guc) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt);
+ ads_blob_write(guc, ads.wa_klv_addr_hi, 0);
+ ads_blob_write(guc, ads.wa_klv_size, size);
+}
+
+static int guc_prep_waklv(struct intel_guc *guc)
+{
+ /* Fudge something chunky for now: */
+ return PAGE_SIZE;
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -843,6 +921,9 @@ static void __guc_ads_init(struct intel_guc *guc)
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
+ /* Workaround KLV list */
+ guc_waklv_init(guc);
+
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
@@ -886,6 +967,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_capture_size = ret;
+ /* And don't forget the workaround KLVs: */
+ ret = guc_prep_waklv(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_waklv_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
@@ -961,7 +1048,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a1cd40d80517..9547fff672bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -51,6 +51,7 @@
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_EIR(0), 0, 0, "EIR" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
@@ -80,9 +81,6 @@
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
-#define COMMON_BASE_HAS_EU \
- { EIR, 0, 0, "EIR" }
-
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
@@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
@@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
@@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!cap || !ee->engine)
return -ENODEV;
- guc = &ee->engine->gt->uc.guc;
+ guc = gt_to_guc(ee->engine->gt);
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
@@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
if (!gt || !ce || !engine)
return false;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return false;
@@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
if (!gt || !ee || !ce)
return;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 52332bb14339..23f54c84cbab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
- if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
@@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
*success = false;
return true;
}
@@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -EPERM;
break;
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ guc_info(guc, "invalid w/a KLV entry\n");
+ ret = -EINVAL;
+ break;
+
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 8ae1846431da..14797e80bc92 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -96,8 +96,9 @@
#define GUC_WA_GAM_CREDITS BIT(10)
#define GUC_WA_DUAL_QUEUE BIT(11)
#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
-#define GUC_WA_CONTEXT_ISOLATION BIT(15)
#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
@@ -430,7 +431,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index cc9569af7f0c..b67a15f74276 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -111,7 +111,7 @@ static bool has_table(struct drm_i915_private *i915)
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!has_table(gt->i915))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 3e681ab6fbf9..706fffca698b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -537,6 +537,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -711,6 +725,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Enable SLPC Optimized Strategy for compute */
+ intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 6ac6503c39d4..1cb5fd44f05c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -45,5 +45,6 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d45..0eaa1064242c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce)
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
- return &ce->engine->gt->uc.guc;
+ return gt_to_guc(ce->engine->gt);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -1246,7 +1246,7 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
@@ -1311,7 +1311,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -1576,7 +1577,7 @@ static void guc_fini_engine_stats(struct intel_guc *guc)
void intel_guc_busyness_park(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
if (!guc_submission_initialized(guc))
return;
@@ -1603,7 +1604,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
unsigned long flags;
ktime_t unused;
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
-
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
@@ -2194,7 +2189,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
- struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(rq->engine->gt);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -2220,11 +2215,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
order_base_2(ce->parallel.number_children
+ 1));
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = ida_alloc_range(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids - 1,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
@@ -2247,8 +2241,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
- ida_simple_remove(&guc->submission_state.guc_ids,
- ce->guc_id.id);
+ ida_free(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
@@ -2645,6 +2639,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_CONTEXT_POLICY_ADD
@@ -2660,10 +2655,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
+ u32 slpc_ctx_freq_req = 0;
unsigned long flags;
int ret;
@@ -2675,11 +2671,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
+ slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
@@ -2736,7 +2736,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
@@ -2805,7 +2805,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
@@ -2868,7 +2868,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
@@ -4496,7 +4496,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
if (engine->class == COMPUTE_CLASS)
if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
+
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4507,7 +4513,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start;
}
@@ -4549,7 +4555,7 @@ static void guc_sched_engine_destroy(struct kref *kref)
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
@@ -5308,7 +5314,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
@@ -5370,7 +5376,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
unsigned long index;
unsigned long flags;
@@ -5822,7 +5828,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve)
return ERR_PTR(-ENOMEM);
- guc = &siblings[0]->gt->uc.guc;
+ guc = gt_to_guc(siblings[0]->gt);
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 0945b177d5f9..2d9152eb7282 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -385,7 +385,7 @@ int intel_huc_init(struct intel_huc *huc)
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
- vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
@@ -540,7 +540,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c6..7a63abf8f644 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
-
- /* XEHPSDV and PVC do not use HuC */
- if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
- i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -637,6 +633,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 756093eaf2ad..d80278eb45d7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -807,7 +807,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
- struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_file *guc_selected = &gt_to_guc(gt)->fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
@@ -1209,7 +1209,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index c900aac85adb..68feb55654f7 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -144,7 +144,7 @@ err:
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d4a3f3e093b0..4be8cb65fb7e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -50,6 +50,7 @@
#include "trace.h"
#include "display/intel_display.h"
+#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0c5dfb788eb..2b7df7fcf369 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,8 +36,10 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_sprite_regs.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 313efdabee57..4140da68aabb 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -34,11 +34,14 @@
*/
#include <uapi/drm/drm_fourcc.h>
-#include "i915_drv.h"
+
#include "gvt.h"
+#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_sprite_regs.h"
+
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
int drm_format; /* Pixel format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index efcb00472be2..102eb354fed6 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -41,6 +41,7 @@
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -49,6 +50,7 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -2763,15 +2765,15 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5b5def6ddef7..922711e0e30b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -37,6 +37,7 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_dpio_phy.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 990eaa029d9c..bc717cf544e4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/sort.h>
#include <linux/string_helpers.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include "display/intel_display_params.h"
@@ -156,18 +157,6 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
case 4: return " WB (2-Way Coh)";
default: return " not defined";
}
- } else if (IS_PONTEVECCHIO(i915)) {
- switch (obj->pat_index) {
- case 0: return " UC";
- case 1: return " WC";
- case 2: return " WT";
- case 3: return " WB";
- case 4: return " WT (CLOS1)";
- case 5: return " WB (CLOS1)";
- case 6: return " WT (CLOS2)";
- case 7: return " WT (CLOS2)";
- default: return " not defined";
- }
} else if (GRAPHICS_VER(i915) >= 12) {
switch (obj->pat_index) {
case 0: return " WB";
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 8bca02025e09..33d2dcb0de65 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/debugfs.h>
#include "i915_debugfs_params.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 9ee902d5b72c..161b21eff694 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -202,7 +202,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
unsigned int i;
for_each_gt(gt, i915, i)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
}
@@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_cleanup_modeset2;
ret = intel_pxp_init(i915);
- if (ret != -ENODEV)
+ if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
@@ -920,27 +920,6 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-static void i915_driver_lastclose(struct drm_device *dev)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- intel_fbdev_restore_mode(i915);
-
- vga_switcheroo_process_delayed_switch();
-}
-
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1831,7 +1810,6 @@ static const struct drm_driver i915_drm_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.release = i915_driver_release,
.open = i915_driver_open,
- .lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
.show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e81b3b2858ac..ee0d7d5f135d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -235,25 +235,17 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
- bool display_irqs_enabled;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
- union {
- u32 irq_mask;
- u32 de_irq_mask[I915_MAX_PIPES];
- };
- u32 pipestat_irq_mask[I915_MAX_PIPES];
+ u32 irq_mask;
bool preserve_bios_swizzle;
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_preferred_vco_freq;
- unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -350,9 +342,6 @@ struct drm_i915_private {
struct intel_pxp *pxp;
- /* For i915gm/i945gm vblank irq workaround */
- u8 vblank_enabled;
-
bool irq_enabled;
struct i915_pmu pmu;
@@ -544,9 +533,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
#define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S)
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
-#define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
-#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
#define IS_LUNARLAKE(i915) 0
@@ -621,17 +608,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
- (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_PVC_BD_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_BASEDIE_STEP(__i915, since, until))
-
-#define IS_PVC_CT_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5c3fec63cb4c..a62405787e77 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -155,12 +155,18 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_FREQ_HINT:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ value = 1;
+ else
+ value = -EINVAL;
+ break;
case I915_PARAM_HAS_CONTEXT_ISOLATION:
value = intel_engines_has_context_isolation(i915);
break;
case I915_PARAM_SLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
value = sseu->slice_mask;
@@ -169,7 +175,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_SUBSLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
/* Only copy bits from the first slice */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a0b784ebaddd..625b3c024540 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/nmi.h>
#include <linux/pagevec.h>
@@ -1245,8 +1246,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
ee->fault_reg = intel_uncore_read(engine->uncore,
XELPMP_RING_FAULT_REG);
-
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
XEHP_RING_FAULT_REG);
else if (GRAPHICS_VER(i915) >= 12)
@@ -1852,7 +1852,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0);
gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8c3f443c8347..49db3e09826c 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
/*
@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
- ei->reg_val_prev = reg_val;
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ei->reg_val_prev = reg_val;
- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
- hwmon->scl_shift_energy);
- mutex_unlock(&hwmon->hwmon_lock);
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
static ssize_t
@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
- goto unlock;
-
- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
-unlock:
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
@@ -738,12 +739,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
- } else if (IS_XEHPSDV(i915)) {
- hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT;
- hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
- hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT;
- hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS;
- hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
@@ -792,7 +787,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!IS_DGFX(i915))
return;
- hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
@@ -818,14 +813,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
hwm_get_preregistration_info(i915);
/* hwmon_dev points to device hwmon<i> */
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
- ddat,
- &hwm_chip_info,
- hwm_groups);
- if (IS_ERR(hwmon_dev)) {
- i915->hwmon = NULL;
- return;
- }
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
+ ddat,
+ &hwm_chip_info,
+ hwm_groups);
+ if (IS_ERR(hwmon_dev))
+ goto err;
ddat->hwmon_dev = hwmon_dev;
@@ -838,16 +831,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
continue;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
- ddat_gt,
- &hwm_gt_chip_info,
- NULL);
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
+ ddat_gt,
+ &hwm_gt_chip_info,
+ NULL);
if (!IS_ERR(hwmon_dev))
ddat_gt->hwmon_dev = hwmon_dev;
}
+ return;
+err:
+ i915_hwmon_unregister(i915);
}
void i915_hwmon_unregister(struct drm_i915_private *i915)
{
- fetch_and_zero(&i915->hwmon);
+ struct i915_hwmon *hwmon = i915->hwmon;
+ struct intel_gt *gt;
+ int i;
+
+ if (!hwmon)
+ return;
+
+ for_each_gt(gt, i915, i)
+ if (hwmon->ddat_gt[i].hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
+
+ if (hwmon->ddat.hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat.hwmon_dev);
+
+ mutex_destroy(&hwmon->hwmon_lock);
+
+ kfree(i915->hwmon);
+ i915->hwmon = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8130f043693b..678d632ed043 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -702,7 +702,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -767,7 +767,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -784,7 +784,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -838,7 +838,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index ba82277254b7..cc41974cee74 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,6 +25,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpufeature.h>
+#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index de43048543e8..8c00169e3ab7 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -108,9 +108,6 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400,
i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
-i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
- "DMC firmware path to use instead of the default one");
-
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 1315d7fac850..2eb3f2115ff2 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -51,7 +51,6 @@ struct drm_printer;
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
- param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 8b4fdeabb12a..405ca17a990b 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,6 +38,9 @@
#include "i915_reg.h"
#include "intel_pci_config.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for device info");
+
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
.__runtime.graphics.ip.ver = (x), \
@@ -59,14 +62,6 @@
[I915_CACHE_WT] = 2, \
}
-#define PVC_CACHELEVEL \
- .cachelevel_to_pat = { \
- [I915_CACHE_NONE] = 0, \
- [I915_CACHE_LLC] = 3, \
- [I915_CACHE_L3_LLC] = 3, \
- [I915_CACHE_WT] = 2, \
- }
-
#define MTL_CACHELEVEL \
.cachelevel_to_pat = { \
[I915_CACHE_NONE] = 2, \
@@ -705,8 +700,6 @@ static const struct intel_device_info adl_p_info = {
I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .__runtime.graphics.ip.ver = 12, \
- .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
TGL_CACHELEVEL, \
.dma_mask_size = 46, \
@@ -730,32 +723,12 @@ static const struct intel_device_info adl_p_info = {
.__runtime.ppgtt_size = 48, \
.__runtime.ppgtt_type = INTEL_PPGTT_FULL
-#define XE_HPM_FEATURES \
- .__runtime.media.ip.ver = 12, \
- .__runtime.media.ip.rel = 50
-
-__maybe_unused
-static const struct intel_device_info xehpsdv_info = {
- XE_HP_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- PLATFORM(INTEL_XEHPSDV),
- .has_64k_pages = 1,
- .has_media_ratio_mode = 1,
- .platform_engine_mask =
- BIT(RCS0) | BIT(BCS0) |
- BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
- BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
- BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
-};
-
#define DG2_FEATURES \
XE_HP_FEATURES, \
- XE_HPM_FEATURES, \
DGFX_FEATURES, \
+ .__runtime.graphics.ip.ver = 12, \
.__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.ver = 12, \
.__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_64k_pages = 1, \
@@ -778,33 +751,6 @@ static const struct intel_device_info ats_m_info = {
.tuning_thread_rr_after_dep = 1,
};
-#define XE_HPC_FEATURES \
- XE_HP_FEATURES, \
- .dma_mask_size = 52, \
- .has_3d_pipeline = 0, \
- .has_guc_deprivilege = 1, \
- .has_l3_ccs_read = 1, \
- .has_mslice_steering = 0, \
- .has_one_eu_per_fuse_bit = 1
-
-__maybe_unused
-static const struct intel_device_info pvc_info = {
- XE_HPC_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- .__runtime.graphics.ip.rel = 60,
- .__runtime.media.ip.rel = 60,
- PLATFORM(INTEL_PONTEVECCHIO),
- .has_flat_ccs = 0,
- .max_pat_index = 7,
- .platform_engine_mask =
- BIT(BCS0) |
- BIT(VCS0) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
- PVC_CACHELEVEL,
-};
-
static const struct intel_gt_definition xelpmp_extra_gt[] = {
{
.type = GT_MEDIA,
@@ -842,6 +788,8 @@ static const struct intel_device_info mtl_info = {
#undef PLATFORM
+__diag_pop();
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bd9d812b1afa..0b1cd4c7a525 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true;
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5)
-#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
*
@@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*/
if (oa_report_ctx_invalid(stream, report) &&
- GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32);
}
@@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream)
mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
- } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
(XEHP_SW_CTX_ID_SHIFT - 32);
@@ -2881,11 +2881,11 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
+ * Wa_1508761755
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -2911,7 +2911,7 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
/*
* Initialize Super Queue Internal Cnt Register
* Set PMON Enable in order to collect valid metrics.
- * Enable byets per clock reporting in OA for XEHPSDV onward.
+ * Enable bytes per clock reporting in OA.
*/
sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
@@ -2971,10 +2971,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
u32 sqcnt1;
/*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
+ * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -4123,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
props->hold_preemption = !!value;
break;
case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
- if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
drm_dbg(&perf->i915->drm,
"SSEU config not supported on gfx %x\n",
GRAPHICS_VER_FULL(perf->i915));
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3baa2f54a86e..14d9ec0ed777 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915,
struct intel_engine_cs *engine;
struct i915_engine_class_instance classinstance;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return -ENODEV;
classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e00557e1a57f..e22a82a5ddd7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,367 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-/*
- * Per pipe/PLL DPIO regs
- */
-#define _VLV_PLL_DW3_CH0 0x800c
-#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
-#define DPIO_POST_DIV_DAC 0
-#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
-#define DPIO_POST_DIV_LVDS1 2
-#define DPIO_POST_DIV_LVDS2 3
-#define DPIO_K_SHIFT (24) /* 4 bits */
-#define DPIO_P1_SHIFT (21) /* 3 bits */
-#define DPIO_P2_SHIFT (16) /* 5 bits */
-#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1 << 11)
-#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
-#define DPIO_M2DIV_MASK 0xff
-#define _VLV_PLL_DW3_CH1 0x802c
-#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
-
-#define _VLV_PLL_DW5_CH0 0x8014
-#define DPIO_REFSEL_OVERRIDE 27
-#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
-#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
-#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
-#define DPIO_PLL_REFCLK_SEL_MASK 3
-#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
-#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
-#define _VLV_PLL_DW5_CH1 0x8034
-#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
-
-#define _VLV_PLL_DW7_CH0 0x801c
-#define _VLV_PLL_DW7_CH1 0x803c
-#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
-
-#define _VLV_PLL_DW8_CH0 0x8040
-#define _VLV_PLL_DW8_CH1 0x8060
-#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
-
-#define VLV_PLL_DW9_BCAST 0xc044
-#define _VLV_PLL_DW9_CH0 0x8044
-#define _VLV_PLL_DW9_CH1 0x8064
-#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
-
-#define _VLV_PLL_DW10_CH0 0x8048
-#define _VLV_PLL_DW10_CH1 0x8068
-#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
-
-#define _VLV_PLL_DW11_CH0 0x804c
-#define _VLV_PLL_DW11_CH1 0x806c
-#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
-
-/* Spec for ref block start counts at DW10 */
-#define VLV_REF_DW13 0x80ac
-
-#define VLV_CMN_DW0 0x8100
-
-/*
- * Per DDI channel DPIO regs
- */
-
-#define _VLV_PCS_DW0_CH0 0x8200
-#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
-#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
-#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
-
-#define _VLV_PCS01_DW0_CH0 0x200
-#define _VLV_PCS23_DW0_CH0 0x400
-#define _VLV_PCS01_DW0_CH1 0x2600
-#define _VLV_PCS23_DW0_CH1 0x2800
-#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
-#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
-
-#define _VLV_PCS_DW1_CH0 0x8204
-#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
-#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
-#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
-
-#define _VLV_PCS01_DW1_CH0 0x204
-#define _VLV_PCS23_DW1_CH0 0x404
-#define _VLV_PCS01_DW1_CH1 0x2604
-#define _VLV_PCS23_DW1_CH1 0x2804
-#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
-#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
-
-#define _VLV_PCS_DW8_CH0 0x8220
-#define _VLV_PCS_DW8_CH1 0x8420
-#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
-#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
-#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
-
-#define _VLV_PCS01_DW8_CH0 0x0220
-#define _VLV_PCS23_DW8_CH0 0x0420
-#define _VLV_PCS01_DW8_CH1 0x2620
-#define _VLV_PCS23_DW8_CH1 0x2820
-#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
-#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
-
-#define _VLV_PCS_DW9_CH0 0x8224
-#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
-#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
-#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
-#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
-#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
-#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
-
-#define _VLV_PCS01_DW9_CH0 0x224
-#define _VLV_PCS23_DW9_CH0 0x424
-#define _VLV_PCS01_DW9_CH1 0x2624
-#define _VLV_PCS23_DW9_CH1 0x2824
-#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
-#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
-
-#define _CHV_PCS_DW10_CH0 0x8228
-#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
-#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
-
-#define _VLV_PCS01_DW10_CH0 0x0228
-#define _VLV_PCS23_DW10_CH0 0x0428
-#define _VLV_PCS01_DW10_CH1 0x2628
-#define _VLV_PCS23_DW10_CH1 0x2828
-#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
-#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
-
-#define _VLV_PCS_DW11_CH0 0x822c
-#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
-#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
-
-#define _VLV_PCS01_DW11_CH0 0x022c
-#define _VLV_PCS23_DW11_CH0 0x042c
-#define _VLV_PCS01_DW11_CH1 0x262c
-#define _VLV_PCS23_DW11_CH1 0x282c
-#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
-#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
-
-#define _VLV_PCS01_DW12_CH0 0x0230
-#define _VLV_PCS23_DW12_CH0 0x0430
-#define _VLV_PCS01_DW12_CH1 0x2630
-#define _VLV_PCS23_DW12_CH1 0x2830
-#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
-#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
-
-#define _VLV_PCS_DW12_CH0 0x8230
-#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
-#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
-
-#define _VLV_PCS_DW14_CH0 0x8238
-#define _VLV_PCS_DW14_CH1 0x8438
-#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
-
-#define _VLV_PCS_DW23_CH0 0x825c
-#define _VLV_PCS_DW23_CH1 0x845c
-#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
-
-#define _VLV_TX_DW2_CH0 0x8288
-#define _VLV_TX_DW2_CH1 0x8488
-#define DPIO_SWING_MARGIN000_SHIFT 16
-#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
-#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
-#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
-
-#define _VLV_TX_DW3_CH0 0x828c
-#define _VLV_TX_DW3_CH1 0x848c
-/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
-#define DPIO_SWING_MARGIN101_SHIFT 16
-#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
-#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
-
-#define _VLV_TX_DW4_CH0 0x8290
-#define _VLV_TX_DW4_CH1 0x8490
-#define DPIO_SWING_DEEMPH9P5_SHIFT 24
-#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
-#define DPIO_SWING_DEEMPH6P0_SHIFT 16
-#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
-#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
-
-#define _VLV_TX3_DW4_CH0 0x690
-#define _VLV_TX3_DW4_CH1 0x2a90
-#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
-
-#define _VLV_TX_DW5_CH0 0x8294
-#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1 << 31)
-#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
-
-#define _VLV_TX_DW11_CH0 0x82ac
-#define _VLV_TX_DW11_CH1 0x84ac
-#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
-
-#define _VLV_TX_DW14_CH0 0x82b8
-#define _VLV_TX_DW14_CH1 0x84b8
-#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
-
-/* CHV dpPhy registers */
-#define _CHV_PLL_DW0_CH0 0x8000
-#define _CHV_PLL_DW0_CH1 0x8180
-#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
-
-#define _CHV_PLL_DW1_CH0 0x8004
-#define _CHV_PLL_DW1_CH1 0x8184
-#define DPIO_CHV_N_DIV_SHIFT 8
-#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
-#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
-
-#define _CHV_PLL_DW2_CH0 0x8008
-#define _CHV_PLL_DW2_CH1 0x8188
-#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
-
-#define _CHV_PLL_DW3_CH0 0x800c
-#define _CHV_PLL_DW3_CH1 0x818c
-#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
-#define DPIO_CHV_FIRST_MOD (0 << 8)
-#define DPIO_CHV_SECOND_MOD (1 << 8)
-#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
-#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
-#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
-
-#define _CHV_PLL_DW6_CH0 0x8018
-#define _CHV_PLL_DW6_CH1 0x8198
-#define DPIO_CHV_GAIN_CTRL_SHIFT 16
-#define DPIO_CHV_INT_COEFF_SHIFT 8
-#define DPIO_CHV_PROP_COEFF_SHIFT 0
-#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
-
-#define _CHV_PLL_DW8_CH0 0x8020
-#define _CHV_PLL_DW8_CH1 0x81A0
-#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
-#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
-#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
-
-#define _CHV_PLL_DW9_CH0 0x8024
-#define _CHV_PLL_DW9_CH1 0x81A4
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
-#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
-#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
-
-#define _CHV_CMN_DW0_CH0 0x8100
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
-#define DPIO_ALLDL_POWERDOWN (1 << 1)
-#define DPIO_ANYDL_POWERDOWN (1 << 0)
-
-#define _CHV_CMN_DW5_CH0 0x8114
-#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
-#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
-#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
-#define CHV_BUFRIGHTENA1_MASK (3 << 20)
-#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
-#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
-#define CHV_BUFLEFTENA1_FORCE (3 << 22)
-#define CHV_BUFLEFTENA1_MASK (3 << 22)
-
-#define _CHV_CMN_DW13_CH0 0x8134
-#define _CHV_CMN_DW0_CH1 0x8080
-#define DPIO_CHV_S1_DIV_SHIFT 21
-#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
-#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
-#define DPIO_CHV_K_DIV_SHIFT 4
-#define DPIO_PLL_FREQLOCK (1 << 1)
-#define DPIO_PLL_LOCK (1 << 0)
-#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
-
-#define _CHV_CMN_DW14_CH0 0x8138
-#define _CHV_CMN_DW1_CH1 0x8084
-#define DPIO_AFC_RECAL (1 << 14)
-#define DPIO_DCLKP_EN (1 << 13)
-#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
-#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
-
-#define _CHV_CMN_DW19_CH0 0x814c
-#define _CHV_CMN_DW6_CH1 0x8098
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
-#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
-#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
-
-#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
-
-#define CHV_CMN_DW28 0x8170
-#define DPIO_CL1POWERDOWNEN (1 << 23)
-#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
-#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
-#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
-
-#define CHV_CMN_DW30 0x8178
-#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
-#define DPIO_LRC_BYPASS (1 << 3)
-
-#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
- (lane) * 0x200 + (offset))
-
-#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
-#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
-#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
-#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
-#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
-#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
-#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
-#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
-#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
-#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
-#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
-#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
-#define DPIO_FRC_LATENCY_SHFIT 8
-#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
-#define DPIO_UPAR_SHIFT 30
-
-/* BXT PHY registers */
-#define _BXT_PHY0_BASE 0x6C000
-#define _BXT_PHY1_BASE 0x162000
-#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) \
- _PICK_EVEN_2RANGES(phy, 1, \
- _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, _BXT_PHY2_BASE)
-
-#define _BXT_PHY(phy, reg) \
- _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
-
-#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
- (reg_ch1) - _BXT_PHY0_BASE))
-#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
-
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define MIPIO_RST_CTRL (1 << 2)
@@ -577,250 +216,6 @@
_PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-/* BXT PHY PLL registers */
-#define _PORT_PLL_A 0x46074
-#define _PORT_PLL_B 0x46078
-#define _PORT_PLL_C 0x4607c
-#define PORT_PLL_ENABLE REG_BIT(31)
-#define PORT_PLL_LOCK REG_BIT(30)
-#define PORT_PLL_REF_SEL REG_BIT(27)
-#define PORT_PLL_POWER_ENABLE REG_BIT(26)
-#define PORT_PLL_POWER_STATE REG_BIT(25)
-#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
-
-#define _PORT_PLL_EBB_0_A 0x162034
-#define _PORT_PLL_EBB_0_B 0x6C034
-#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
-#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
-#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
-#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
-
-#define _PORT_PLL_EBB_4_A 0x162038
-#define _PORT_PLL_EBB_4_B 0x6C038
-#define _PORT_PLL_EBB_4_C 0x6C344
-#define PORT_PLL_RECALIBRATE REG_BIT(14)
-#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
-#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
-
-#define _PORT_PLL_0_A 0x162100
-#define _PORT_PLL_0_B 0x6C100
-#define _PORT_PLL_0_C 0x6C380
-/* PORT_PLL_0_A */
-#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
-#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
-/* PORT_PLL_1_A */
-#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
-#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
-/* PORT_PLL_2_A */
-#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
-#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
-/* PORT_PLL_3_A */
-#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
-/* PORT_PLL_6_A */
-#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
-#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
-#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
-#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
-#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
-/* PORT_PLL_8_A */
-#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
-#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
-/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
-#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
-/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
-#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
-#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
-#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
- (idx) * 4)
-
-/* BXT PHY common lane registers */
-#define _PORT_CL1CM_DW0_A 0x162000
-#define _PORT_CL1CM_DW0_BC 0x6C000
-#define PHY_POWER_GOOD (1 << 16)
-#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-
-#define _PORT_CL1CM_DW9_A 0x162024
-#define _PORT_CL1CM_DW9_BC 0x6C024
-#define IREF0RC_OFFSET_SHIFT 8
-#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
-
-#define _PORT_CL1CM_DW10_A 0x162028
-#define _PORT_CL1CM_DW10_BC 0x6C028
-#define IREF1RC_OFFSET_SHIFT 8
-#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-
-#define _PORT_CL1CM_DW28_A 0x162070
-#define _PORT_CL1CM_DW28_BC 0x6C070
-#define OCL1_POWER_DOWN_EN (1 << 23)
-#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
-#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
-
-#define _PORT_CL1CM_DW30_A 0x162078
-#define _PORT_CL1CM_DW30_BC 0x6C078
-#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-
-/* The spec defines this only for BXT PHY0, but lets assume that this
- * would exist for PHY1 too if it had a second channel.
- */
-#define _PORT_CL2CM_DW6_A 0x162358
-#define _PORT_CL2CM_DW6_BC 0x6C358
-#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
-#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-
-/* BXT PHY Ref registers */
-#define _PORT_REF_DW3_A 0x16218C
-#define _PORT_REF_DW3_BC 0x6C18C
-#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
-
-#define _PORT_REF_DW6_A 0x162198
-#define _PORT_REF_DW6_BC 0x6C198
-#define GRC_CODE_SHIFT 24
-#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
-#define GRC_CODE_FAST_SHIFT 16
-#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
-#define GRC_CODE_SLOW_SHIFT 8
-#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
-#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
-
-#define _PORT_REF_DW8_A 0x1621A0
-#define _PORT_REF_DW8_BC 0x6C1A0
-#define GRC_DIS (1 << 15)
-#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
-
-/* BXT PHY PCS registers */
-#define _PORT_PCS_DW10_LN01_A 0x162428
-#define _PORT_PCS_DW10_LN01_B 0x6C428
-#define _PORT_PCS_DW10_LN01_C 0x6C828
-#define _PORT_PCS_DW10_GRP_A 0x162C28
-#define _PORT_PCS_DW10_GRP_B 0x6CC28
-#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
-
-#define TX2_SWING_CALC_INIT (1 << 31)
-#define TX1_SWING_CALC_INIT (1 << 30)
-
-#define _PORT_PCS_DW12_LN01_A 0x162430
-#define _PORT_PCS_DW12_LN01_B 0x6C430
-#define _PORT_PCS_DW12_LN01_C 0x6C830
-#define _PORT_PCS_DW12_LN23_A 0x162630
-#define _PORT_PCS_DW12_LN23_B 0x6C630
-#define _PORT_PCS_DW12_LN23_C 0x6CA30
-#define _PORT_PCS_DW12_GRP_A 0x162c30
-#define _PORT_PCS_DW12_GRP_B 0x6CC30
-#define _PORT_PCS_DW12_GRP_C 0x6CE30
-#define LANESTAGGER_STRAP_OVRD (1 << 6)
-#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
-
-/* BXT PHY TX registers */
-#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
- ((lane) & 1) * 0x80)
-
-#define _PORT_TX_DW2_LN0_A 0x162508
-#define _PORT_TX_DW2_LN0_B 0x6C508
-#define _PORT_TX_DW2_LN0_C 0x6C908
-#define _PORT_TX_DW2_GRP_A 0x162D08
-#define _PORT_TX_DW2_GRP_B 0x6CD08
-#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
-#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define MARGIN_000_SHIFT 16
-#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
-#define UNIQ_TRANS_SCALE_SHIFT 8
-#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT)
-
-#define _PORT_TX_DW3_LN0_A 0x16250C
-#define _PORT_TX_DW3_LN0_B 0x6C50C
-#define _PORT_TX_DW3_LN0_C 0x6C90C
-#define _PORT_TX_DW3_GRP_A 0x162D0C
-#define _PORT_TX_DW3_GRP_B 0x6CD0C
-#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
-#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define SCALE_DCOMP_METHOD (1 << 26)
-#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
-
-#define _PORT_TX_DW4_LN0_A 0x162510
-#define _PORT_TX_DW4_LN0_B 0x6C510
-#define _PORT_TX_DW4_LN0_C 0x6C910
-#define _PORT_TX_DW4_GRP_A 0x162D10
-#define _PORT_TX_DW4_GRP_B 0x6CD10
-#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
-#define DEEMPH_SHIFT 24
-#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
-
-#define _PORT_TX_DW5_LN0_A 0x162514
-#define _PORT_TX_DW5_LN0_B 0x6C514
-#define _PORT_TX_DW5_LN0_C 0x6C914
-#define _PORT_TX_DW5_GRP_A 0x162D14
-#define _PORT_TX_DW5_GRP_B 0x6CD14
-#define _PORT_TX_DW5_GRP_C 0x6CF14
-#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_LN0_B, \
- _PORT_TX_DW5_LN0_C)
-#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_GRP_B, \
- _PORT_TX_DW5_GRP_C)
-#define DCC_DELAY_RANGE_1 (1 << 9)
-#define DCC_DELAY_RANGE_2 (1 << 8)
-
-#define _PORT_TX_DW14_LN0_A 0x162538
-#define _PORT_TX_DW14_LN0_B 0x6C538
-#define _PORT_TX_DW14_LN0_C 0x6C938
-#define LATENCY_OPTIM_SHIFT 30
-#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
- _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
-
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
/* SKL VccIO mask */
@@ -1228,22 +623,6 @@
#define I915_ASLE_INTERRUPT (1 << 0)
#define I915_BSD_USER_INTERRUPT (1 << 25)
-#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
-#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
-
-/* DisplayPort Audio w/ LPE */
-#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
-#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
-
-#define _VLV_AUD_PORT_EN_B_DBG (VLV_DISPLAY_BASE + 0x62F20)
-#define _VLV_AUD_PORT_EN_C_DBG (VLV_DISPLAY_BASE + 0x62F30)
-#define _VLV_AUD_PORT_EN_D_DBG (VLV_DISPLAY_BASE + 0x62F34)
-#define VLV_AUD_PORT_EN_DBG(port) _MMIO_PORT3((port) - PORT_B, \
- _VLV_AUD_PORT_EN_B_DBG, \
- _VLV_AUD_PORT_EN_C_DBG, \
- _VLV_AUD_PORT_EN_D_DBG)
-#define VLV_AMP_MUTE (1 << 1)
-
#define GEN6_BSD_RNCID _MMIO(0x12198)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
@@ -1264,109 +643,6 @@
#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
-/*
- * Framebuffer compression (915+ only)
- */
-
-#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
-#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
-#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN REG_BIT(31)
-#define FBC_CTL_PERIODIC REG_BIT(30)
-#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
-#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
-#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
-#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
-#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
-#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
-#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
-#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
-#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS REG_BIT(0)
-#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING REG_BIT(31)
-#define FBC_STAT_COMPRESSED REG_BIT(30)
-#define FBC_STAT_MODIFIED REG_BIT(29)
-#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
-#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
-#define FBC_CTL_FENCE_DBL REG_BIT(4)
-#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
-#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
-#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
-#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
-#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
-#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
-#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
-#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
-#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
-#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
-#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
-#define FBC_MOD_NUM_VALID REG_BIT(0)
-#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
-#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
-#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
-#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
-#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
-#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
-
-#define FBC_LL_SIZE (1536)
-
-/* Framebuffer compression for GM45+ */
-#define DPFC_CB_BASE _MMIO(0x3200)
-#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
-#define DPFC_CONTROL _MMIO(0x3208)
-#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
-#define DPFC_CTL_EN REG_BIT(31)
-#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
-#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
-#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
-#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
-#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
-#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
-#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
-#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
-#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
-#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
-#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
-#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
-#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
-#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
-#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
-#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
-#define DPFC_RECOMP_STALL_EN REG_BIT(27)
-#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
-#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
-#define DPFC_STATUS _MMIO(0x3210)
-#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
-#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
-#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
-#define DPFC_STATUS2 _MMIO(0x3214)
-#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
-#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
-#define DPFC_FENCE_YOFF _MMIO(0x3218)
-#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
-#define DPFC_CHICKEN _MMIO(0x3224)
-#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
-#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
-#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
-#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
-#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
-#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
-
-#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
-#define FBC_STRIDE_OVERRIDE REG_BIT(15)
-#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
-#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
-
-#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID REG_BIT(0)
-#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
-
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS REG_BIT(22)
#define ILK_PABSTRETCH_DIS REG_BIT(21)
@@ -1382,37 +658,18 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-
-/*
- * Framebuffer compression for Sandybridge
- *
- * The following two registers are of type GTTMMADR
- */
-#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_DPFC_FENCE_EN REG_BIT(29)
-#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
-#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
-#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
-
-/* Framebuffer compression for Ivybridge */
-#define IVB_FBC_RT_BASE _MMIO(0x7020)
-#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
-
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE REG_BIT(31)
#define IPS_FALSE_COLOR REG_BIT(4)
-#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
-#define FBC_REND_NUKE REG_BIT(2)
-#define FBC_REND_CACHE_CLEAN REG_BIT(1)
-
/*
* Clock control & power management
*/
-#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
-#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
-#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
-#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
#define VGA1 _MMIO(0x6004)
@@ -1508,10 +765,11 @@
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
-#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
-#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
-#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -1716,42 +974,10 @@
#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-/*
- * Palette regs
- */
-#define _PALETTE_A 0xa000
-#define _PALETTE_B 0xa800
-#define _CHV_PALETTE_C 0xc000
-/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
-#define PALETTE_RED_MASK REG_GENMASK(23, 16)
-#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
-#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode ldw */
-#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
-#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
-#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode udw */
-#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
-#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
-#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
-#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
-#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
-#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
-#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
-#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
-#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK_EVEN_2RANGES(pipe, 2, \
- _PALETTE_A, _PALETTE_B, \
- _CHV_PALETTE_C, _CHV_PALETTE_C) + \
- (i) * 4)
-
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
-#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
-#define PVC_RP_STATE_CAP _MMIO(0x281014)
#define MTL_RP_STATE_CAP _MMIO(0x138000)
#define MTL_MEDIAP_STATE_CAP _MMIO(0x138020)
@@ -1911,18 +1137,18 @@
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
-#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
-#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
-#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
-#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
-#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
-#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
+#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_CTL_A)
+#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_1_A_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_2_A_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_3_A_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_4_A_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_5_A_IVB)
-#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
-#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
-#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
-#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
-#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
+#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RED_A)
+#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_GREEN_A)
+#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_BLUE_A)
+#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES1_A_I915)
+#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES2_A_G4X)
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
@@ -1991,23 +1217,23 @@
#define _TRANS_VSYNC_DSI1 0x6b814
#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
-#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
-#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
-#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
-#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
-#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
-#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
-#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
-#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
-#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
#define VRR_CTL_VRR_ENABLE REG_BIT(31)
#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
@@ -2021,21 +1247,21 @@
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VMAXSHIFT_A)
#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
#define VRR_VMAXSHIFT_DEC REG_BIT(16)
@@ -2045,7 +1271,7 @@
#define _TRANS_VRR_STATUS_B 0x6142C
#define _TRANS_VRR_STATUS_C 0x6242C
#define _TRANS_VRR_STATUS_D 0x6342C
-#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
@@ -2065,7 +1291,7 @@
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VTOTAL_PREV_A)
#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
@@ -2076,7 +1302,7 @@
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_FLIPLINE_A)
#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
@@ -2084,17 +1310,24 @@
#define _TRANS_VRR_STATUS2_B 0x6143C
#define _TRANS_VRR_STATUS2_C 0x6243C
#define _TRANS_VRR_STATUS2_D 0x6343C
-#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60A70
#define _TRANS_PUSH_B 0x61A70
#define _TRANS_PUSH_C 0x62A70
#define _TRANS_PUSH_D 0x63A70
-#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
#define TRANS_PUSH_EN REG_BIT(31)
#define TRANS_PUSH_SEND REG_BIT(30)
+#define _TRANS_VRR_VSYNC_A 0x60078
+#define TRANS_VRR_VSYNC(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2312,6 +1545,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
@@ -2350,6 +1584,8 @@
#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -2588,6 +1824,9 @@
#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -2639,18 +1878,18 @@
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
-#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
-#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
-#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define TRANSCONF(trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define PIPEDSL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(dev_priv, pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
#define _PIPE_MISC_A 0x70030
@@ -2694,7 +1933,7 @@
#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
#define _ICL_PIPE_A_STATUS 0x70058
-#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS)
#define PIPE_STATUS_UNDERRUN REG_BIT(31)
#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
@@ -2969,8 +2208,8 @@
#define _WM0_PIPEA_ILK 0x45100
#define _WM0_PIPEB_ILK 0x45104
#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
@@ -3024,8 +2263,8 @@
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_G4X 0x70040
#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
-#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
+#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
/* Cursor A & B regs */
#define _CURACNTR 0x70080
@@ -3053,6 +2292,7 @@
#define MCURSOR_MODE_DISABLE 0x00
#define MCURSOR_MODE_128_32B_AX 0x02
#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_2B 0x04
#define MCURSOR_MODE_64_32B_AX 0x07
#define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX)
#define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX)
@@ -3085,14 +2325,14 @@
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
-#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
-#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
-#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
-#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
-#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
-#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
-#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
+#define CURCNTR(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURACNTR)
+#define CURBASE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURABASE)
+#define CURPOS(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS)
+#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS_ERLY_TPT)
+#define CURSIZE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASIZE)
+#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_FBC_CTL_A)
+#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_CHICKEN_A)
+#define CURSURFLIVE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASURFLIVE)
/* Display A control */
#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
@@ -3149,18 +2389,18 @@
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
-#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
-#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
-#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
-#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
-#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
-#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
-#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
-#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR_VLV)
+#define DSPCNTR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPACNTR)
+#define DSPADDR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASTRIDE)
+#define DSPPOS(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAPOS)
+#define DSPSIZE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASIZE)
+#define DSPSURF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURF)
+#define DSPTILEOFF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
-#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
-#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
+#define DSPOFFSET(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO_PIPE2(dev_priv, plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -3187,11 +2427,11 @@
#define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha))
-#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
-#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
-#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
-#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
-#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
+#define CHV_BLEND(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMCNSTALPHA_A)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3241,346 +2481,6 @@
#define _PIPEDSI0CONF 0x7b008
#define _PIPEDSI1CONF 0x7b808
-/* Sprite A control */
-#define _DVSACNTR 0x72180
-#define DVS_ENABLE REG_BIT(31)
-#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
-#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
-#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
-#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
-#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
-#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
-#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
-#define DVS_SOURCE_KEY REG_BIT(22)
-#define DVS_RGB_ORDER_XBGR REG_BIT(20)
-#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
-#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
-#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
-#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
-#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
-#define DVS_ROTATE_180 REG_BIT(15)
-#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define DVS_TILED REG_BIT(10)
-#define DVS_DEST_KEY REG_BIT(2)
-#define _DVSALINOFF 0x72184
-#define _DVSASTRIDE 0x72188
-#define _DVSAPOS 0x7218c
-#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
-#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
-#define DVS_POS_X_MASK REG_GENMASK(15, 0)
-#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
-#define _DVSASIZE 0x72190
-#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
-#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
-#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
-#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
-#define _DVSAKEYVAL 0x72194
-#define _DVSAKEYMSK 0x72198
-#define _DVSASURF 0x7219c
-#define DVS_ADDR_MASK REG_GENMASK(31, 12)
-#define _DVSAKEYMAXVAL 0x721a0
-#define _DVSATILEOFF 0x721a4
-#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
-#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
-#define _DVSASURFLIVE 0x721ac
-#define _DVSAGAMC_G4X 0x721e0 /* g4x */
-#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE REG_BIT(31)
-#define DVS_FILTER_MASK REG_GENMASK(30, 29)
-#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
-#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
-#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
-#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
-#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
-#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
-#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
-
-#define _DVSBCNTR 0x73180
-#define _DVSBLINOFF 0x73184
-#define _DVSBSTRIDE 0x73188
-#define _DVSBPOS 0x7318c
-#define _DVSBSIZE 0x73190
-#define _DVSBKEYVAL 0x73194
-#define _DVSBKEYMSK 0x73198
-#define _DVSBSURF 0x7319c
-#define _DVSBKEYMAXVAL 0x731a0
-#define _DVSBTILEOFF 0x731a4
-#define _DVSBSURFLIVE 0x731ac
-#define _DVSBGAMC_G4X 0x731e0 /* g4x */
-#define _DVSBSCALE 0x73204
-#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
-#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
-
-#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
-#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
-#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
-#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
-#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
-#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
-#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
-#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
-#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
-#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
-#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
-#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
-#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
-#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
-#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
-
-#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE REG_BIT(31)
-#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
-#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
-#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
-#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
-#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
-#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
-#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
-#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
-#define SPRITE_SOURCE_KEY REG_BIT(22)
-#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
-#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
-#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
-#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
-#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
-#define SPRITE_ROTATE_180 REG_BIT(15)
-#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
-#define SPRITE_TILED REG_BIT(10)
-#define SPRITE_DEST_KEY REG_BIT(2)
-#define _SPRA_LINOFF 0x70284
-#define _SPRA_STRIDE 0x70288
-#define _SPRA_POS 0x7028c
-#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
-#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
-#define _SPRA_SIZE 0x70290
-#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
-#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
-#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
-#define _SPRA_KEYVAL 0x70294
-#define _SPRA_KEYMSK 0x70298
-#define _SPRA_SURF 0x7029c
-#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPRA_KEYMAX 0x702a0
-#define _SPRA_TILEOFF 0x702a4
-#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
-#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
-#define _SPRA_OFFSET 0x702a4
-#define _SPRA_SURFLIVE 0x702ac
-#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE REG_BIT(31)
-#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
-#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
-#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
-#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
-#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
-#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
-#define _SPRA_GAMC 0x70400
-#define _SPRA_GAMC16 0x70440
-#define _SPRA_GAMC17 0x7044c
-
-#define _SPRB_CTL 0x71280
-#define _SPRB_LINOFF 0x71284
-#define _SPRB_STRIDE 0x71288
-#define _SPRB_POS 0x7128c
-#define _SPRB_SIZE 0x71290
-#define _SPRB_KEYVAL 0x71294
-#define _SPRB_KEYMSK 0x71298
-#define _SPRB_SURF 0x7129c
-#define _SPRB_KEYMAX 0x712a0
-#define _SPRB_TILEOFF 0x712a4
-#define _SPRB_OFFSET 0x712a4
-#define _SPRB_SURFLIVE 0x712ac
-#define _SPRB_SCALE 0x71304
-#define _SPRB_GAMC 0x71400
-#define _SPRB_GAMC16 0x71440
-#define _SPRB_GAMC17 0x7144c
-
-#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
-#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
-#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
-#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
-#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
-#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
-#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
-#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
-#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
-#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
-#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
-#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
-#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
-#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
-#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
-
-#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE REG_BIT(31)
-#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SP_FORMAT_MASK REG_GENMASK(29, 26)
-#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
-#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
-#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
-#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
-#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
-#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
-#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
-#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
-#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
-#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
-#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
-#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
-#define SP_SOURCE_KEY REG_BIT(22)
-#define SP_YUV_FORMAT_BT709 REG_BIT(18)
-#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
-#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
-#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
-#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
-#define SP_ROTATE_180 REG_BIT(15)
-#define SP_TILED REG_BIT(10)
-#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
-#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
-#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
-#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
-#define SP_POS_Y_MASK REG_GENMASK(31, 16)
-#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
-#define SP_POS_X_MASK REG_GENMASK(15, 0)
-#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
-#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
-#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
-#define SP_WIDTH_MASK REG_GENMASK(15, 0)
-#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
-#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
-#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
-#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
-#define SP_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
-#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
-#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
-#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
-#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
-#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
-#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
-#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
-#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
-#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
-#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
-#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
-#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
-#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
-#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
-#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
-#define SP_SH_COS_MASK REG_GENMASK(9, 0)
-#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
-
-#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
-#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
-#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
-#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
-#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
-#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
-#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
-#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
-#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
-#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
-#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
-#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
-#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
-#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
-
-#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
-#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
-
-#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
-#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
-#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
-#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
-#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
-#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
-#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
-#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
-#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
-#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
-#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
-#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
-#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
-#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
-
-/*
- * CHV pipe B sprite CSC
- *
- * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
- * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
- * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
- */
-#define _MMIO_CHV_SPCSC(plane_id, reg) \
- _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
-
-#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
-#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
-#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
-#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
-#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
-#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
-#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
-#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
-#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
-#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
-#define SPCSC_C1_MASK REG_GENMASK(30, 16)
-#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
-#define SPCSC_C0_MASK REG_GENMASK(14, 0)
-#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
-
-#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
-#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
-#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
-#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
-#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
-#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
-#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
-#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
-#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
-#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
-#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
-#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
-
/* Skylake plane registers */
#define _PLANE_CTL_1_A 0x70180
@@ -3990,14 +2890,14 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
-#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
-#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
-#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
-#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
-#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
-#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
-#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
+#define PIPE_DATA_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4555,6 +3455,11 @@
#define GLK_CL1_PWR_DOWN REG_BIT(11)
#define GLK_CL0_PWR_DOWN REG_BIT(10)
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
@@ -4599,7 +3504,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
@@ -4611,7 +3516,9 @@
#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
@@ -5010,27 +3917,29 @@
#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
-#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-
-#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
-#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
-
-#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
-#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
-
-#define VLV_TVIDEO_DIP_CTL(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
-#define VLV_TVIDEO_DIP_DATA(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
-#define VLV_TVIDEO_DIP_GCP(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
@@ -5040,6 +3949,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
@@ -5054,6 +3964,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
@@ -5073,22 +3984,25 @@
#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
-#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+/*ADLP and later: */
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
+#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -5401,7 +4315,7 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
-#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
@@ -5566,15 +4480,6 @@ enum skl_power_gate {
((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-#define _ICL_AUX_ANAOVRD1_A 0x162398
-#define _ICL_AUX_ANAOVRD1_B 0x6C398
-#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
- _ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B))
-#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
-#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -5583,7 +4488,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
+#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
@@ -5638,7 +4543,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
@@ -5651,7 +4556,7 @@ enum skl_power_gate {
#define _DP_TP_CTL_B 0x64140
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -5677,7 +4582,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_B 0x64144
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -5858,14 +4763,14 @@ enum skl_power_gate {
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
+#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
/* See DP_MSA_MISC_* for the bit definitions */
#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
@@ -5900,7 +4805,9 @@ enum skl_power_gate {
#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_BIT(25)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
@@ -6317,7 +5224,7 @@ enum skl_power_gate {
#define _VLV_PIPE_MSA_MISC_A 0x70048
#define VLV_PIPE_MSA_MISC(pipe) \
- _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
+ _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
@@ -6390,7 +5297,7 @@ enum skl_power_gate {
#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 0d735d5c2b35..942345548bc3 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -126,7 +126,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
return 0;
err_free_blocks:
- drm_buddy_free_list(mm, &bman_res->blocks);
+ drm_buddy_free_list(mm, &bman_res->blocks, 0);
mutex_unlock(&bman->lock);
err_free_res:
ttm_resource_fini(man, &bman_res->base);
@@ -141,7 +141,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
- drm_buddy_free_list(&bman->mm, &bman_res->blocks);
+ drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
bman->visible_avail += bman_res->used_visible_size;
mutex_unlock(&bman->lock);
@@ -345,7 +345,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
- drm_buddy_free_list(mm, &bman->reserved);
+ drm_buddy_free_list(mm, &bman->reserved, 0);
drm_buddy_fini(mm);
bman->visible_avail += bman->visible_reserved;
WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b45ef0560611..06ec6ceb61d5 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -73,20 +73,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows_t(T, A, B) \
- __builtin_add_overflow_p((A), (B), (T)0)
-#else
-#define add_overflows_t(T, A, B) ({ \
- typeof(A) a = (A); \
- typeof(B) b = (B); \
- (T)(a + b) < a; \
-})
-#endif
-
-#define add_overflows(A, B) \
- add_overflows_t(typeof((A) + (B)), (A), (B))
-
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34ba37..d2f064d2525c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
@@ -1740,8 +1776,6 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt,
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
- /* Wait for async active retire */
- i915_active_wait(&vma->active);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 9c21ce69bd98..1dc5281b2ade 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -28,6 +28,7 @@
#include "display/intel_de.h"
#include "display/intel_display.h"
#include "display/intel_display_trace.h"
+#include "display/intel_fbc_regs.h"
#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
@@ -105,12 +106,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: bxt
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:bxt
- * Display WA #0883: bxt
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *i915)
@@ -349,13 +344,6 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *i915,
intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl);
}
-static void xehpsdv_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_22010146351:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-}
-
static void dg2_init_clock_gating(struct drm_i915_private *i915)
{
/* Wa_22010954014:dg2 */
@@ -363,17 +351,6 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGSI_SIDECLK_DIS);
}
-static void pvc_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_14012385139:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-
- /* Wa_22010954014:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
-}
-
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
if (!HAS_PCH_CNP(i915))
@@ -396,13 +373,6 @@ static void cfl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: cfl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:cfl
- * Display WA #0873: cfl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *i915)
@@ -427,13 +397,6 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: kbl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:kbl
- * Display WA #0873: kbl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skl_init_clock_gating(struct drm_i915_private *i915)
@@ -452,19 +415,6 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: skl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:skl
- * Display WA #0873: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:skl
- * Display WA #0883: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *i915)
@@ -762,9 +712,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
-CG_FUNCS(pvc);
CG_FUNCS(dg2);
-CG_FUNCS(xehpsdv);
CG_FUNCS(cfl);
CG_FUNCS(skl);
CG_FUNCS(kbl);
@@ -797,12 +745,8 @@ CG_FUNCS(nop);
*/
void intel_clock_gating_hooks_init(struct drm_i915_private *i915)
{
- if (IS_PONTEVECCHIO(i915))
- i915->clock_gating_funcs = &pvc_clock_gating_funcs;
- else if (IS_DG2(i915))
+ if (IS_DG2(i915))
i915->clock_gating_funcs = &dg2_clock_gating_funcs;
- else if (IS_XEHPSDV(i915))
- i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
i915->clock_gating_funcs = &cfl_clock_gating_funcs;
else if (IS_SKYLAKE(i915))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 59bea1398c91..a0a43ea07f11 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -70,9 +70,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(DG1),
PLATFORM_NAME(ALDERLAKE_S),
PLATFORM_NAME(ALDERLAKE_P),
- PLATFORM_NAME(XEHPSDV),
PLATFORM_NAME(DG2),
- PLATFORM_NAME(PONTEVECCHIO),
PLATFORM_NAME(METEORLAKE),
};
#undef PLATFORM_NAME
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index eba2f0b919c8..d1a2abc7e513 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -87,9 +87,7 @@ enum intel_platform {
INTEL_DG1,
INTEL_ALDERLAKE_S,
INTEL_ALDERLAKE_P,
- INTEL_XEHPSDV,
INTEL_DG2,
- INTEL_PONTEVECCHIO,
INTEL_METEORLAKE,
INTEL_MAX_PLATFORMS
};
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ecc5104fd9..e1a35f70b544 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
@@ -10,9 +11,11 @@
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_engine_regs.h"
@@ -1155,11 +1158,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
@@ -1180,11 +1183,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
@@ -1205,11 +1208,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d4e844128826..2d0647aca964 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -272,15 +272,11 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure
*
- * This function grabs a device-level runtime pm reference (mostly used for GEM
- * code to ensure the GTT or GT is on).
+ * This function grabs a device-level runtime pm reference.
*
- * It will _not_ power up the device but instead only check that it's powered
- * on. Therefore it is only valid to call this functions from contexts where
- * the device is known to be powered up and where trying to power it up would
- * result in hilarity and deadlocks. That pretty much means only the system
- * suspend/resume code where this is used to grab runtime pm references for
- * delayed setup down in work items.
+ * It will _not_ resume the device but instead only get an extra wakeref.
+ * Therefore it is only valid to call this functions from contexts where
+ * the device is known to be active and with another wakeref previously hold.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@@ -289,7 +285,7 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
*/
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- assert_rpm_wakelock_held(rpm);
+ assert_rpm_raw_wakeref_held(rpm);
pm_runtime_get_noresume(rpm->kdev);
intel_runtime_pm_acquire(rpm, true);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index b4162f1be765..a5adfb5d8fd2 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -102,13 +102,6 @@ static const struct intel_step_info adlp_revids[] = {
[0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
-static const struct intel_step_info xehpsdv_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0) },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1) },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0) },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0) },
-};
-
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
[0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
@@ -153,8 +146,6 @@ static u8 gmd_to_intel_step(struct drm_i915_private *i915,
return step;
}
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
-
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -178,10 +169,7 @@ void intel_step_init(struct drm_i915_private *i915)
return;
}
- if (IS_PONTEVECCHIO(i915)) {
- pvc_step_init(i915, revid);
- return;
- } else if (IS_DG2_G10(i915)) {
+ if (IS_DG2_G10(i915)) {
revids = dg2_g10_revid_step_tbl;
size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
} else if (IS_DG2_G11(i915)) {
@@ -190,9 +178,6 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_DG2_G12(i915)) {
revids = dg2_g12_revid_step_tbl;
size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
- } else if (IS_XEHPSDV(i915)) {
- revids = xehpsdv_revids;
- size = ARRAY_SIZE(xehpsdv_revids);
} else if (IS_ALDERLAKE_P_N(i915)) {
revids = adlp_n_revids;
size = ARRAY_SIZE(adlp_n_revids);
@@ -277,69 +262,6 @@ void intel_step_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->step = step;
}
-#define PVC_BD_REVID GENMASK(5, 3)
-#define PVC_CT_REVID GENMASK(2, 0)
-
-static const int pvc_bd_subids[] = {
- [0x0] = STEP_A0,
- [0x3] = STEP_B0,
- [0x4] = STEP_B1,
- [0x5] = STEP_B3,
-};
-
-static const int pvc_ct_subids[] = {
- [0x3] = STEP_A0,
- [0x5] = STEP_B0,
- [0x6] = STEP_B1,
- [0x7] = STEP_C0,
-};
-
-static int
-pvc_step_lookup(struct drm_i915_private *i915, const char *type,
- const int *table, int size, int subid)
-{
- if (subid < size && table[subid] != STEP_NONE)
- return table[subid];
-
- drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid);
-
- /*
- * As on other platforms, try to use the next higher ID if we land on a
- * gap in the table.
- */
- while (subid < size && table[subid] == STEP_NONE)
- subid++;
-
- if (subid < size) {
- drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n",
- type, subid);
- return table[subid];
- }
-
- drm_dbg(&i915->drm, "Using future steppings\n");
- return STEP_FUTURE;
-}
-
-/*
- * PVC needs special handling since we don't lookup the
- * revid in a table, but rather specific bitfields within
- * the revid for various components.
- */
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid)
-{
- int ct_subid, bd_subid;
-
- bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid);
- ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid);
-
- RUNTIME_INFO(i915)->step.basedie_step =
- pvc_step_lookup(i915, "Base Die", pvc_bd_subids,
- ARRAY_SIZE(pvc_bd_subids), bd_subid);
- RUNTIME_INFO(i915)->step.graphics_step =
- pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids,
- ARRAY_SIZE(pvc_ct_subids), ct_subid);
-}
-
#define STEP_NAME_CASE(name) \
case STEP_##name: \
return #name;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 76400e9c40f0..729409a4bada 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1106,45 +1106,6 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range pvc_shadowed_regs[] = {
- { .start = 0x2030, .end = 0x2030 },
- { .start = 0x2510, .end = 0x2550 },
- { .start = 0xA008, .end = 0xA00C },
- { .start = 0xA188, .end = 0xA188 },
- { .start = 0xA278, .end = 0xA278 },
- { .start = 0xA540, .end = 0xA56C },
- { .start = 0xC4C8, .end = 0xC4C8 },
- { .start = 0xC4E0, .end = 0xC4E0 },
- { .start = 0xC600, .end = 0xC600 },
- { .start = 0xC658, .end = 0xC658 },
- { .start = 0x22030, .end = 0x22030 },
- { .start = 0x22510, .end = 0x22550 },
- { .start = 0x1C0030, .end = 0x1C0030 },
- { .start = 0x1C0510, .end = 0x1C0550 },
- { .start = 0x1C4030, .end = 0x1C4030 },
- { .start = 0x1C4510, .end = 0x1C4550 },
- { .start = 0x1C8030, .end = 0x1C8030 },
- { .start = 0x1C8510, .end = 0x1C8550 },
- { .start = 0x1D0030, .end = 0x1D0030 },
- { .start = 0x1D0510, .end = 0x1D0550 },
- { .start = 0x1D4030, .end = 0x1D4030 },
- { .start = 0x1D4510, .end = 0x1D4550 },
- { .start = 0x1D8030, .end = 0x1D8030 },
- { .start = 0x1D8510, .end = 0x1D8550 },
- { .start = 0x1E0030, .end = 0x1E0030 },
- { .start = 0x1E0510, .end = 0x1E0550 },
- { .start = 0x1E4030, .end = 0x1E4030 },
- { .start = 0x1E4510, .end = 0x1E4550 },
- { .start = 0x1E8030, .end = 0x1E8030 },
- { .start = 0x1E8510, .end = 0x1E8550 },
- { .start = 0x1F0030, .end = 0x1F0030 },
- { .start = 0x1F0510, .end = 0x1F0550 },
- { .start = 0x1F4030, .end = 0x1F4030 },
- { .start = 0x1F4510, .end = 0x1F4550 },
- { .start = 0x1F8030, .end = 0x1F8030 },
- { .start = 0x1F8510, .end = 0x1F8550 },
-};
-
static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
@@ -1471,195 +1432,31 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
0x1d3f00 - 0x1d3fff: VD2 */
};
-/*
- * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
- * switching it from the GT domain to the render domain.
- */
-#define XEHP_FWRANGES(FW_RANGE_D800) \
- GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
- 0x0 - 0xaff: reserved \
- 0xb00 - 0x1fff: always on */ \
- GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
- 0x4b00 - 0x4fff: reserved \
- 0x5000 - 0x51ff: always on */ \
- GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
- 0x8160 - 0x817f: reserved \
- 0x8180 - 0x81ff: always on */ \
- GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
- 0x8500 - 0x87ff: gt \
- 0x8800 - 0x8c7f: reserved \
- 0x8c80 - 0x8cff: gt (DG2 only) */ \
- GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
- 0x8d00 - 0x8dff: render (DG2 only) \
- 0x8e00 - 0x8fff: reserved */ \
- GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
- 0x9000 - 0x947f: gt \
- 0x9480 - 0x94cf: reserved */ \
- GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
- 0x9560 - 0x95ff: always on \
- 0x9600 - 0x967f: reserved */ \
- GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
- 0x9680 - 0x96ff: render (DG2 only) \
- 0x9700 - 0x97ff: reserved */ \
- GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
- 0x9800 - 0xb4ff: gt \
- 0xb500 - 0xbfff: reserved \
- 0xc000 - 0xcfff: gt */ \
- GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
- GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
- GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
- 0xdd00 - 0xddff: gt \
- 0xde00 - 0xde7f: reserved */ \
- GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
- 0xde80 - 0xdfff: render \
- 0xe000 - 0xe0ff: reserved \
- 0xe100 - 0xe8ff: render */ \
- GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
- 0xe900 - 0xe9ff: gt \
- 0xea00 - 0xefff: reserved \
- 0xf000 - 0xffff: gt */ \
- GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
- 0x10000 - 0x11fff: reserved \
- 0x12000 - 0x127ff: always on \
- 0x12800 - 0x12fff: reserved */ \
- GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
- GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x13200 - 0x133ff: VD2 (DG2 only) \
- 0x13400 - 0x13fff: reserved */ \
- GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
- 0x15000 - 0x15fff: gt (DG2 only) \
- 0x16000 - 0x16dff: reserved */ \
- GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
- 0x21000 - 0x21fff: reserved */ \
- GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
- 0x24000 - 0x2407f: always on \
- 0x24080 - 0x2417f: reserved */ \
- GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
- 0x24180 - 0x241ff: gt \
- 0x24200 - 0x249ff: reserved */ \
- GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
- 0x24a00 - 0x24a7f: render \
- 0x24a80 - 0x251ff: reserved */ \
- GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
- 0x25200 - 0x252ff: gt \
- 0x25300 - 0x25fff: reserved */ \
- GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
- 0x26000 - 0x27fff: render \
- 0x28000 - 0x29fff: reserved \
- 0x2a000 - 0x2ffff: undocumented */ \
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
- GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x1c0000 - 0x1c2bff: VD0 \
- 0x1c2c00 - 0x1c2cff: reserved \
- 0x1c2d00 - 0x1c2dff: VD0 \
- 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
- 0x1c3f00 - 0x1c3fff: VD0 */ \
- GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
- 0x1c4000 - 0x1c6bff: VD1 \
- 0x1c6c00 - 0x1c6cff: reserved \
- 0x1c6d00 - 0x1c6dff: VD1 \
- 0x1c6e00 - 0x1c7fff: reserved */ \
- GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
- 0x1c8000 - 0x1ca0ff: VE0 \
- 0x1ca100 - 0x1cbfff: reserved */ \
- GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
- GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
- GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
- GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
- GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x1d0000 - 0x1d2bff: VD2 \
- 0x1d2c00 - 0x1d2cff: reserved \
- 0x1d2d00 - 0x1d2dff: VD2 \
- 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
- 0x1d3e00 - 0x1d3eff: reserved \
- 0x1d3f00 - 0x1d3fff: VD2 */ \
- GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
- 0x1d4000 - 0x1d6bff: VD3 \
- 0x1d6c00 - 0x1d6cff: reserved \
- 0x1d6d00 - 0x1d6dff: VD3 \
- 0x1d6e00 - 0x1d7fff: reserved */ \
- GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
- 0x1d8000 - 0x1da0ff: VE1 \
- 0x1da100 - 0x1dffff: reserved */ \
- GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
- 0x1e0000 - 0x1e2bff: VD4 \
- 0x1e2c00 - 0x1e2cff: reserved \
- 0x1e2d00 - 0x1e2dff: VD4 \
- 0x1e2e00 - 0x1e3eff: reserved \
- 0x1e3f00 - 0x1e3fff: VD4 */ \
- GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
- 0x1e4000 - 0x1e6bff: VD5 \
- 0x1e6c00 - 0x1e6cff: reserved \
- 0x1e6d00 - 0x1e6dff: VD5 \
- 0x1e6e00 - 0x1e7fff: reserved */ \
- GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
- 0x1e8000 - 0x1ea0ff: VE2 \
- 0x1ea100 - 0x1effff: reserved */ \
- GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
- 0x1f0000 - 0x1f2bff: VD6 \
- 0x1f2c00 - 0x1f2cff: reserved \
- 0x1f2d00 - 0x1f2dff: VD6 \
- 0x1f2e00 - 0x1f3eff: reserved \
- 0x1f3f00 - 0x1f3fff: VD6 */ \
- GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
- 0x1f4000 - 0x1f6bff: VD7 \
- 0x1f6c00 - 0x1f6cff: reserved \
- 0x1f6d00 - 0x1f6dff: VD7 \
- 0x1f6e00 - 0x1f7fff: reserved */ \
- GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
-
-static const struct intel_forcewake_range __xehp_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_GT)
-};
-
static const struct intel_forcewake_range __dg2_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_RENDER)
-};
-
-static const struct intel_forcewake_range __pvc_fw_ranges[] = {
- GEN_FW_RANGE(0x0, 0xaff, 0),
- GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
- GEN_FW_RANGE(0xc00, 0xfff, 0),
- GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /*
+ 0x0 - 0xaff: reserved
+ 0xb00 - 0x1fff: always on */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
- GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
- 0x4000 - 0x4aff: gt
+ GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
0x4b00 - 0x4fff: reserved
- 0x5000 - 0x51ff: gt
- 0x5200 - 0x52ff: reserved
- 0x5300 - 0x53ff: gt
- 0x5400 - 0x7fff: reserved
- 0x8000 - 0x813f: gt */
- GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8180, 0x81ff, 0),
- GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
- 0x8200 - 0x82ff: gt
- 0x8300 - 0x84ff: reserved
- 0x8500 - 0x887f: gt
- 0x8880 - 0x8a7f: reserved
- 0x8a80 - 0x8aff: gt
- 0x8b00 - 0x8fff: reserved
+ 0x5000 - 0x51ff: always on */
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
+ 0x8160 - 0x817f: reserved
+ 0x8180 - 0x81ff: always on */
+ GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
+ 0x8500 - 0x87ff: gt
+ 0x8800 - 0x8c7f: reserved
+ 0x8c80 - 0x8cff: gt (DG2 only) */
+ GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
+ 0x8d00 - 0x8dff: render (DG2 only)
+ 0x8e00 - 0x8fff: reserved */
+ GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
0x9000 - 0x947f: gt
0x9480 - 0x94cf: reserved */
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
@@ -1673,65 +1470,114 @@ static const struct intel_forcewake_range __pvc_fw_ranges[] = {
0x9800 - 0xb4ff: gt
0xb500 - 0xbfff: reserved
0xc000 - 0xcfff: gt */
- GEN_FW_RANGE(0xd000, 0xd3ff, 0),
- GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xd000, 0xd7ff, 0),
+ GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
0xdd00 - 0xddff: gt
0xde00 - 0xde7f: reserved */
GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
- 0xde80 - 0xdeff: render
- 0xdf00 - 0xe1ff: reserved
- 0xe200 - 0xe7ff: render
- 0xe800 - 0xe8ff: reserved */
- GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
- 0xe900 - 0xe9ff: gt
- 0xea00 - 0xebff: reserved
- 0xec00 - 0xffff: gt
- 0x10000 - 0x11fff: reserved */
- GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
+ 0xde80 - 0xdfff: render
+ 0xe000 - 0xe0ff: reserved
+ 0xe100 - 0xe8ff: render */
+ GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
+ 0xe900 - 0xe9ff: gt
+ 0xea00 - 0xefff: reserved
+ 0xf000 - 0xffff: gt */
+ GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
+ 0x10000 - 0x11fff: reserved
0x12000 - 0x127ff: always on
0x12800 - 0x12fff: reserved */
- GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
- 0x13000 - 0x135ff: gt
- 0x13600 - 0x147ff: reserved
- 0x14800 - 0x153ff: gt
- 0x15400 - 0x19fff: reserved */
- GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
- 0x1a000 - 0x1ffff: render
+ GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x13200 - 0x133ff: VD2 (DG2 only)
+ 0x13400 - 0x147ff: reserved */
+ GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
+ 0x15000 - 0x15fff: gt (DG2 only)
+ 0x16000 - 0x16dff: reserved */
+ GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
+ 0x16e00 - 0x1ffff: render
0x20000 - 0x21fff: reserved */
GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
- 24000 - 0x2407f: always on
- 24080 - 0x2417f: reserved */
- GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
+ 0x24000 - 0x2407f: always on
+ 0x24080 - 0x2417f: reserved */
+ GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
0x24180 - 0x241ff: gt
- 0x24200 - 0x251ff: reserved
+ 0x24200 - 0x249ff: reserved */
+ GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
+ 0x24a00 - 0x24a7f: render
+ 0x24a80 - 0x251ff: reserved */
+ GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
0x25200 - 0x252ff: gt
0x25300 - 0x25fff: reserved */
GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
0x26000 - 0x27fff: render
- 0x28000 - 0x2ffff: reserved */
+ 0x28000 - 0x29fff: reserved
+ 0x2a000 - 0x2ffff: undocumented */
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
0x1c0000 - 0x1c2bff: VD0
0x1c2c00 - 0x1c2cff: reserved
0x1c2d00 - 0x1c2dff: VD0
- 0x1c2e00 - 0x1c3eff: reserved
+ 0x1c2e00 - 0x1c3eff: VD0
0x1c3f00 - 0x1c3fff: VD0 */
- GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
- 0x1c4000 - 0x1c6aff: VD1
- 0x1c6b00 - 0x1c7eff: reserved
- 0x1c7f00 - 0x1c7fff: VD1
- 0x1c8000 - 0x1cffff: reserved */
- GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
- 0x1d0000 - 0x1d2aff: VD2
- 0x1d2b00 - 0x1d3eff: reserved
- 0x1d3f00 - 0x1d3fff: VD2
- 0x1d4000 - 0x23ffff: reserved */
- GEN_FW_RANGE(0x240000, 0x3dffff, 0),
- GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
+ 0x1c4000 - 0x1c6bff: VD1
+ 0x1c6c00 - 0x1c6cff: reserved
+ 0x1c6d00 - 0x1c6dff: VD1
+ 0x1c6e00 - 0x1c7fff: reserved */
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
+ 0x1c8000 - 0x1ca0ff: VE0
+ 0x1ca100 - 0x1cbfff: reserved */
+ GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
+ GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x1d0000 - 0x1d2bff: VD2
+ 0x1d2c00 - 0x1d2cff: reserved
+ 0x1d2d00 - 0x1d2dff: VD2
+ 0x1d2e00 - 0x1d3dff: VD2
+ 0x1d3e00 - 0x1d3eff: reserved
+ 0x1d3f00 - 0x1d3fff: VD2 */
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
+ 0x1d4000 - 0x1d6bff: VD3
+ 0x1d6c00 - 0x1d6cff: reserved
+ 0x1d6d00 - 0x1d6dff: VD3
+ 0x1d6e00 - 0x1d7fff: reserved */
+ GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
+ 0x1d8000 - 0x1da0ff: VE1
+ 0x1da100 - 0x1dffff: reserved */
+ GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
+ 0x1e0000 - 0x1e2bff: VD4
+ 0x1e2c00 - 0x1e2cff: reserved
+ 0x1e2d00 - 0x1e2dff: VD4
+ 0x1e2e00 - 0x1e3eff: reserved
+ 0x1e3f00 - 0x1e3fff: VD4 */
+ GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
+ 0x1e4000 - 0x1e6bff: VD5
+ 0x1e6c00 - 0x1e6cff: reserved
+ 0x1e6d00 - 0x1e6dff: VD5
+ 0x1e6e00 - 0x1e7fff: reserved */
+ GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
+ 0x1e8000 - 0x1ea0ff: VE2
+ 0x1ea100 - 0x1effff: reserved */
+ GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
+ 0x1f0000 - 0x1f2bff: VD6
+ 0x1f2c00 - 0x1f2cff: reserved
+ 0x1f2d00 - 0x1f2dff: VD6
+ 0x1f2e00 - 0x1f3eff: reserved
+ 0x1f3f00 - 0x1f3fff: VD6 */
+ GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
+ 0x1f4000 - 0x1f6bff: VD7
+ 0x1f6c00 - 0x1f6cff: reserved
+ 0x1f6d00 - 0x1f6dff: VD7
+ 0x1f6e00 - 0x1f7fff: reserved */
+ GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
};
static const struct intel_forcewake_range __mtl_fw_ranges[] = {
@@ -2576,18 +2422,10 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) >= 12) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
@@ -2734,7 +2572,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
* the forcewake domain if any of the other engines
* in the same media slice are present.
*/
- if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index ee79e0809a6d..fee76c1d2f45 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -154,6 +154,30 @@ __wait_gsc_proxy_completed(struct drm_i915_private *i915)
pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
}
+static void
+__wait_gsc_huc_load_completed(struct drm_i915_private *i915)
+{
+ /* this only applies to DG2, so we only care about GT0 */
+ struct intel_huc *huc = &to_gt(i915)->uc.huc;
+ bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_PXP) &&
+ intel_huc_wait_required(huc));
+ /*
+ * The GSC and PXP mei bringup depends on the kernel boot ordering, so
+ * to account for the worst case scenario the HuC code waits for up to
+ * 10s for the GSC driver to load and then another 5s for the PXP
+ * component to bind before giving up, even though those steps normally
+ * complete in less than a second from the i915 load. We match that
+ * timeout here, but we expect to bail early due to the fence being
+ * signalled even in a failure case, as it is extremely unlikely that
+ * both components will use their full timeout.
+ */
+ unsigned long timeout_ms = 15000;
+
+ if (need_to_wait &&
+ wait_for(i915_sw_fence_done(&huc->delayed_load.fence), timeout_ms))
+ pr_warn(DRIVER_NAME "Timed out waiting for huc load via GSC!\n");
+}
+
static int __run_selftests(const char *name,
struct selftest *st,
unsigned int count,
@@ -228,14 +252,16 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.live)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(live, pdev_to_i915(pdev));
+ err = run_selftests(live, i915);
if (err) {
i915_selftest.live = err;
return err;
@@ -251,14 +277,16 @@ int i915_live_selftests(struct pci_dev *pdev)
int i915_perf_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.perf)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(perf, pdev_to_i915(pdev));
+ err = run_selftests(perf, i915);
if (err) {
i915_selftest.perf = err;
return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 4f98aa8a861e..41eaa9b7f67d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -71,7 +71,6 @@ static int intel_shadow_table_check(void)
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
- { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
@@ -119,8 +118,6 @@ int intel_uncore_mock_selftests(void)
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
- { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
- { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
};
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 15492b69f698..e3287f1de774 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -681,6 +681,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
if (ret)
return;
+ drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
+
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index ffa195560d0d..68291412f4cb 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -9,7 +9,6 @@
#include "vlv_sideband.h"
#include "display/intel_dpio_phy.h"
-#include "display/intel_display_types.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 31199e45b72e..73707daa4e52 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -12,6 +12,7 @@
#include <linux/build_bug.h>
#include <linux/dcache.h>
+#include <linux/debugfs.h>
#include <linux/sysfs.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index b7fef3c797e6..4f99b4af871c 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -46,7 +46,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
if (!mips_data)
return -ENOMEM;
- for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
+ for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!mips_data->pt_pages[page_nr]) {
err = -ENOMEM;
@@ -102,7 +102,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
int page_nr;
vunmap(mips_data->pt);
- for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
+ for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
index bacf0655ebaf..5d810ac02171 100644
--- a/drivers/gpu/drm/imx/ipuv3/Kconfig
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -35,7 +35,8 @@ config DRM_IMX_LDB
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
- select DRM_DW_HDMI
- depends on DRM_IMX && OF
+ depends on DRM_DW_HDMI
+ depends on DRM_IMX
+ depends on OF
help
Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index dade8b59feae..704c549750f9 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -773,6 +773,13 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
.atomic_update = ipu_plane_atomic_update,
};
+static const struct drm_plane_helper_funcs ipu_primary_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
+ .get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
@@ -916,7 +923,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
- drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&ipu_plane->base, &ipu_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 3db117c5edd9..23effeb2ac72 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -27,8 +27,8 @@ config DRM_INGENIC_IPU
config DRM_INGENIC_DW_HDMI
tristate "Ingenic specific support for Synopsys DW HDMI"
+ depends on DRM_DW_HDMI
depends on MACH_JZ4780
- select DRM_DW_HDMI
help
Choose this option to enable Synopsys DesignWare HDMI based driver.
If you want to enable HDMI on Ingenic JZ4780 based SoC, you should
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index fbc43f243c54..6d000504e1a4 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
}
+int lima_bcast_mask_irq(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
+ return 0;
+}
+
+int lima_bcast_reset(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
int i;
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index 465ee587bceb..cd08841e4787 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
void lima_bcast_enable(struct lima_device *dev, int num_pp);
+int lima_bcast_mask_irq(struct lima_ip *ip);
+int lima_bcast_reset(struct lima_ip *ip);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 10fd9154cc46..739c865b556f 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -371,6 +371,7 @@ static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
struct drm_device *ddev;
+ const struct lima_compatible *comp;
int err;
err = lima_sched_slab_init();
@@ -384,7 +385,13 @@ static int lima_pdev_probe(struct platform_device *pdev)
}
ldev->dev = &pdev->dev;
- ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+ comp = of_device_get_match_data(&pdev->dev);
+ if (!comp) {
+ err = -ENODEV;
+ goto err_out0;
+ }
+
+ ldev->id = comp->id;
platform_set_drvdata(pdev, ldev);
@@ -459,9 +466,17 @@ static void lima_pdev_remove(struct platform_device *pdev)
lima_sched_slab_fini();
}
+static const struct lima_compatible lima_mali400_data = {
+ .id = lima_gpu_mali400,
+};
+
+static const struct lima_compatible lima_mali450_data = {
+ .id = lima_gpu_mali450,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
- { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+ { .compatible = "arm,mali-400", .data = &lima_mali400_data },
+ { .compatible = "arm,mali-450", .data = &lima_mali450_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index c738d288547b..6706c19b166e 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -7,6 +7,7 @@
#include <drm/drm_file.h>
#include "lima_ctx.h"
+#include "lima_device.h"
extern int lima_sched_timeout_ms;
extern uint lima_heap_init_nr_pages;
@@ -39,6 +40,10 @@ struct lima_submit {
struct lima_sched_task *task;
};
+struct lima_compatible {
+ enum lima_gpu_id id;
+};
+
static inline struct lima_drm_priv *
to_lima_drm_priv(struct drm_file *file)
{
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index 6b354e2fb61d..3282997a0358 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -233,6 +233,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+}
+
static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
@@ -338,7 +345,9 @@ int lima_gp_init(struct lima_ip *ip)
void lima_gp_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
int lima_gp_pipe_init(struct lima_device *dev)
@@ -365,6 +374,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
pipe->task_recover = lima_gp_task_recover;
+ pipe->task_mask_irq = lima_gp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index e18317c5ca8c..6611e2836bf0 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
void lima_mmu_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
void lima_mmu_flush_tlb(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index d0d2db0ef1ce..eaab4788dff4 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -286,7 +286,9 @@ int lima_pp_init(struct lima_ip *ip)
void lima_pp_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
int lima_pp_bcast_resume(struct lima_ip *ip)
@@ -319,7 +321,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
void lima_pp_bcast_fini(struct lima_ip *ip)
{
+ struct lima_device *dev = ip->dev;
+ devm_free_irq(dev->dev, ip->irq, ip);
}
static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
@@ -429,6 +433,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
lima_pp_hard_reset(ip);
}
+
+ if (pipe->bcast_processor)
+ lima_bcast_reset(pipe->bcast_processor);
}
static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
@@ -437,6 +444,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ if (pipe->bcast_processor)
+ lima_bcast_mask_irq(pipe->bcast_processor);
+}
+
static struct kmem_cache *lima_pp_task_slab;
static int lima_pp_task_slab_refcnt;
@@ -468,6 +489,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_pp_task_fini;
pipe->task_error = lima_pp_task_error;
pipe->task_mmu_error = lima_pp_task_mmu_error;
+ pipe->task_mask_irq = lima_pp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 00b19adfc888..bbf3f8feab94 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -422,12 +422,21 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
for (i = 0; i < pipe->num_processor; i++)
synchronize_irq(pipe->processor[i]->irq);
+ if (pipe->bcast_processor)
+ synchronize_irq(pipe->bcast_processor->irq);
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
return DRM_GPU_SCHED_STAT_NOMINAL;
}
+ /*
+ * The task might still finish while this timeout handler runs.
+ * To prevent a race condition on its completion, mask all irqs
+ * on the running core until the next hard reset completes.
+ */
+ pipe->task_mask_irq(pipe);
+
if (!pipe->error)
DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 6bd4f3b70109..85b23ba901d5 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -80,6 +80,7 @@ struct lima_sched_pipe {
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
int (*task_recover)(struct lima_sched_pipe *pipe);
+ void (*task_mask_irq)(struct lima_sched_pipe *pipe);
struct work_struct recover_work;
};
diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c
index 827acab580fa..03958b79f251 100644
--- a/drivers/gpu/drm/loongson/lsdc_crtc.c
+++ b/drivers/gpu/drm/loongson/lsdc_crtc.c
@@ -3,6 +3,7 @@
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index 04293df2f0de..a720d8f53209 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -19,33 +19,24 @@ static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
- ret = lsdc_bo_reserve(lbo);
- if (unlikely(ret))
- return ret;
+ dma_resv_assert_held(obj->resv);
ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
lbo->sharing_count++;
- lsdc_bo_unreserve(lbo);
-
return ret;
}
static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
{
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
- int ret;
- ret = lsdc_bo_reserve(lbo);
- if (unlikely(ret))
- return;
+ dma_resv_assert_held(obj->resv);
lsdc_bo_unpin(lbo);
if (lbo->sharing_count)
lbo->sharing_count--;
-
- lsdc_bo_unreserve(lbo);
}
static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 76cab28e010c..6caab8d4d4e0 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -22,11 +22,11 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on DRM_MEDIATEK
select PHY_MTK_DP
- select DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DP_AUX_BUS
help
DRM/KMS Display Port driver for MediaTek SoCs.
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5e4436403b8d..32a2ed6c0cfe 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-mediatek-drm-y := mtk_disp_aal.o \
+mediatek-drm-y := mtk_crtc.o \
+ mtk_ddp_comp.o \
+ mtk_disp_aal.o \
mtk_disp_ccorr.o \
mtk_disp_color.o \
mtk_disp_gamma.o \
@@ -8,16 +10,14 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_disp_ovl.o \
mtk_disp_ovl_adaptor.o \
mtk_disp_rdma.o \
- mtk_drm_crtc.o \
- mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
- mtk_drm_gem.o \
- mtk_drm_plane.o \
mtk_dsi.o \
mtk_dpi.o \
mtk_ethdr.o \
+ mtk_gem.o \
mtk_mdp_rdma.o \
- mtk_padding.o
+ mtk_padding.o \
+ mtk_plane.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index a04499c4f9ca..6f34f573e127 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -19,14 +19,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_gem.h"
-#include "mtk_drm_plane.h"
+#include "mtk_gem.h"
+#include "mtk_plane.h"
/*
- * struct mtk_drm_crtc - MediaTek specific crtc structure.
+ * struct mtk_crtc - MediaTek specific crtc structure.
* @base: crtc object.
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
@@ -38,7 +38,7 @@
*
* TODO: Needs update: this header is missing a bunch of member descriptions.
*/
-struct mtk_drm_crtc {
+struct mtk_crtc {
struct drm_crtc base;
bool enabled;
@@ -80,9 +80,9 @@ struct mtk_crtc_state {
unsigned int pending_vrefresh;
};
-static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
+static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c)
{
- return container_of(c, struct mtk_drm_crtc, base);
+ return container_of(c, struct mtk_crtc, base);
}
static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
@@ -90,7 +90,7 @@ static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
return container_of(s, struct mtk_crtc_state, base);
}
-static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
@@ -104,11 +104,11 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
}
}
-static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
drm_crtc_handle_vblank(&mtk_crtc->base);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
- mtk_drm_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
}
@@ -151,9 +151,9 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
}
#endif
-static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
+static void mtk_crtc_destroy(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
mtk_mutex_put(mtk_crtc->mutex);
@@ -176,7 +176,7 @@ static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
-static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
+static void mtk_crtc_reset(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
@@ -191,7 +191,7 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
}
-static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state;
@@ -208,18 +208,17 @@ static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc
return &state->base;
}
-static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static void mtk_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_mtk_crtc_state(state));
}
static enum drm_mode_status
-mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
enum drm_mode_status status = MODE_OK;
int i;
@@ -231,15 +230,15 @@ mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
return status;
}
-static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
/* Nothing to do here, but this callback is mandatory. */
return true;
}
-static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
@@ -250,7 +249,7 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
state->pending_config = true;
}
-static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
+static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc)
{
int ret;
int i;
@@ -270,7 +269,7 @@ err:
return ret;
}
-static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc)
{
int i;
@@ -279,11 +278,11 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
}
static
-struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
- struct drm_plane *plane,
- unsigned int *local_layer)
+struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp;
int i, count = 0;
unsigned int local_index = plane - mtk_crtc->planes;
@@ -306,7 +305,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
struct cmdq_cb_data *data = mssg;
struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
- struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
@@ -346,7 +345,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
}
#endif
-static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
struct drm_connector *connector;
@@ -431,7 +430,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
/* should not enable layer before crtc enabled */
plane_state->pending.enable = false;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state, NULL);
@@ -446,7 +445,7 @@ err_pm_runtime_put:
return ret;
}
-static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -491,7 +490,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
struct cmdq_pkt *cmdq_handle)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
@@ -522,8 +521,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
if (!plane_state->pending.config)
continue;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
- &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
@@ -547,8 +545,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
if (!plane_state->pending.async_config)
continue;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
- &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
@@ -563,8 +560,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
}
}
-static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
- bool needs_vblank)
+static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
@@ -636,7 +632,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
static void mtk_crtc_ddp_irq(void *data)
{
struct drm_crtc *crtc = data;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
@@ -652,9 +648,9 @@ static void mtk_crtc_ddp_irq(void *data)
mtk_drm_finish_page_flip(mtk_crtc);
}
-static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+static int mtk_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_enable_vblank(comp);
@@ -662,22 +658,22 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+static void mtk_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_disable_vblank(comp);
}
-static void mtk_drm_crtc_update_output(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_update_output(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
int crtc_index = drm_crtc_index(crtc);
int i;
struct device *dev;
struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv;
unsigned int encoder_mask = crtc_state->encoder_mask;
@@ -707,33 +703,33 @@ static void mtk_drm_crtc_update_output(struct drm_crtc *crtc,
}
}
-int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
- struct mtk_plane_state *state)
+int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
{
unsigned int local_layer;
struct mtk_ddp_comp *comp;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
return mtk_ddp_comp_layer_check(comp, local_layer, state);
return 0;
}
-void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_atomic_state *state)
+void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
if (!mtk_crtc->enabled)
return;
- mtk_drm_crtc_update_config(mtk_crtc, false);
+ mtk_crtc_update_config(mtk_crtc, false);
}
-static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
@@ -745,7 +741,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- mtk_drm_crtc_update_output(crtc, state);
+ mtk_crtc_update_output(crtc, state);
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
@@ -757,10 +753,10 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
mtk_crtc->enabled = true;
}
-static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
@@ -779,7 +775,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
mtk_crtc->pending_planes = true;
- mtk_drm_crtc_update_config(mtk_crtc, false);
+ mtk_crtc_update_config(mtk_crtc, false);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
/* Wait for planes to be disabled by cmdq */
if (mtk_crtc->cmdq_client.chan)
@@ -797,13 +793,13 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
mtk_crtc->enabled = false;
}
-static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
unsigned long flags;
if (mtk_crtc->event && mtk_crtc_state->base.event)
@@ -821,10 +817,10 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
}
}
-static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void mtk_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
if (crtc->state->color_mgmt_changed)
@@ -832,33 +828,32 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
}
- mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
+ mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .destroy = mtk_drm_crtc_destroy,
- .reset = mtk_drm_crtc_reset,
- .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
- .atomic_destroy_state = mtk_drm_crtc_destroy_state,
- .enable_vblank = mtk_drm_crtc_enable_vblank,
- .disable_vblank = mtk_drm_crtc_disable_vblank,
+ .destroy = mtk_crtc_destroy,
+ .reset = mtk_crtc_reset,
+ .atomic_duplicate_state = mtk_crtc_duplicate_state,
+ .atomic_destroy_state = mtk_crtc_destroy_state,
+ .enable_vblank = mtk_crtc_enable_vblank,
+ .disable_vblank = mtk_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
- .mode_fixup = mtk_drm_crtc_mode_fixup,
- .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
- .mode_valid = mtk_drm_crtc_mode_valid,
- .atomic_begin = mtk_drm_crtc_atomic_begin,
- .atomic_flush = mtk_drm_crtc_atomic_flush,
- .atomic_enable = mtk_drm_crtc_atomic_enable,
- .atomic_disable = mtk_drm_crtc_atomic_disable,
+ .mode_fixup = mtk_crtc_mode_fixup,
+ .mode_set_nofb = mtk_crtc_mode_set_nofb,
+ .mode_valid = mtk_crtc_mode_valid,
+ .atomic_begin = mtk_crtc_atomic_begin,
+ .atomic_flush = mtk_crtc_atomic_flush,
+ .atomic_enable = mtk_crtc_atomic_enable,
+ .atomic_disable = mtk_crtc_atomic_disable,
};
-static int mtk_drm_crtc_init(struct drm_device *drm,
- struct mtk_drm_crtc *mtk_crtc,
- unsigned int pipe)
+static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc,
+ unsigned int pipe)
{
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
@@ -885,8 +880,7 @@ err_cleanup_crtc:
return ret;
}
-static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
- int comp_idx)
+static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx)
{
struct mtk_ddp_comp *comp;
@@ -904,8 +898,8 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
-enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
- unsigned int num_planes)
+enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx,
+ unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
@@ -916,11 +910,11 @@ enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
}
-static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
- struct mtk_drm_crtc *mtk_crtc,
- int comp_idx, int pipe)
+static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_crtc *mtk_crtc,
+ int comp_idx, int pipe)
{
- int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
int i, ret;
@@ -928,8 +922,7 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
- mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
- num_planes),
+ mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp));
@@ -941,9 +934,9 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
return 0;
}
-struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
+struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = NULL;
+ struct mtk_crtc *mtk_crtc = NULL;
if (!crtc)
return NULL;
@@ -955,14 +948,14 @@ struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
return mtk_crtc->dma_dev;
}
-int mtk_drm_crtc_create(struct drm_device *drm_dev,
- const unsigned int *path, unsigned int path_len,
- int priv_data_index, const struct mtk_drm_route *conn_routes,
- unsigned int num_conn_routes)
+int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
+ unsigned int path_len, int priv_data_index,
+ const struct mtk_drm_route *conn_routes,
+ unsigned int num_conn_routes)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
- struct mtk_drm_crtc *mtk_crtc;
+ struct mtk_crtc *mtk_crtc;
unsigned int num_comp_planes = 0;
int ret;
int i;
@@ -1009,10 +1002,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
- mtk_crtc->ddp_comp = devm_kmalloc_array(dev,
- mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
- sizeof(*mtk_crtc->ddp_comp),
- GFP_KERNEL);
+ mtk_crtc->ddp_comp = devm_kcalloc(dev,
+ mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
+ sizeof(*mtk_crtc->ddp_comp),
+ GFP_KERNEL);
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
@@ -1047,7 +1040,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+ num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i);
mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
sizeof(struct drm_plane), GFP_KERNEL);
@@ -1055,8 +1048,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return -ENOMEM;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
- ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
- crtc_i);
+ ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i);
if (ret)
return ret;
}
@@ -1068,7 +1060,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
*/
mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i);
+ ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i);
if (ret < 0)
return ret;
@@ -1138,7 +1130,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->num_conn_routes = num_conn_routes;
mtk_crtc->conn_routes = conn_routes;
- /* increase ddp_comp_nr at the end of mtk_drm_crtc_create */
+ /* increase ddp_comp_nr at the end of mtk_crtc_create */
mtk_crtc->ddp_comp_nr++;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
new file mode 100644
index 000000000000..388e900b6f4d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_CRTC_H
+#define MTK_CRTC_H
+
+#include <drm/drm_crtc.h>
+#include "mtk_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_plane.h"
+
+#define MTK_MAX_BPC 10
+#define MTK_MIN_BPC 3
+
+void mtk_crtc_commit(struct drm_crtc *crtc);
+int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
+ unsigned int path_len, int priv_data_index,
+ const struct mtk_drm_route *conn_routes,
+ unsigned int num_conn_routes);
+int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
+void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_atomic_state *plane_state);
+struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
+
+#endif /* MTK_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index a515e96cfefc..17b036411292 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -14,11 +14,11 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <drm/drm_print.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_plane.h"
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_crtc.h"
+#include "mtk_plane.h"
#define DISP_REG_DITHER_EN 0x0000
@@ -497,10 +497,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX]
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static bool mtk_drm_find_comp_in_ddp(struct device *dev,
- const unsigned int *path,
- unsigned int path_len,
- struct mtk_ddp_comp *ddp_comp)
+static bool mtk_ddp_comp_find(struct device *dev,
+ const unsigned int *path,
+ unsigned int path_len,
+ struct mtk_ddp_comp *ddp_comp)
{
unsigned int i;
@@ -514,10 +514,10 @@ static bool mtk_drm_find_comp_in_ddp(struct device *dev,
return false;
}
-static unsigned int mtk_drm_find_comp_in_ddp_conn_path(struct device *dev,
- const struct mtk_drm_route *routes,
- unsigned int num_routes,
- struct mtk_ddp_comp *ddp_comp)
+static unsigned int mtk_ddp_comp_find_in_route(struct device *dev,
+ const struct mtk_drm_route *routes,
+ unsigned int num_routes,
+ struct mtk_ddp_comp *ddp_comp)
{
int ret;
unsigned int i;
@@ -554,26 +554,31 @@ int mtk_ddp_comp_get_id(struct device_node *node,
return -EINVAL;
}
-unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct device *dev)
+unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev)
{
struct mtk_drm_private *private = drm->dev_private;
unsigned int ret = 0;
- if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
- private->ddp_comp))
+ if (mtk_ddp_comp_find(dev,
+ private->data->main_path,
+ private->data->main_len,
+ private->ddp_comp))
ret = BIT(0);
- else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
- private->data->ext_len, private->ddp_comp))
+ else if (mtk_ddp_comp_find(dev,
+ private->data->ext_path,
+ private->data->ext_len,
+ private->ddp_comp))
ret = BIT(1);
- else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
- private->data->third_len, private->ddp_comp))
+ else if (mtk_ddp_comp_find(dev,
+ private->data->third_path,
+ private->data->third_len,
+ private->ddp_comp))
ret = BIT(2);
else
- ret = mtk_drm_find_comp_in_ddp_conn_path(dev,
- private->data->conn_routes,
- private->data->num_conn_routes,
- private->ddp_comp);
+ ret = mtk_ddp_comp_find_in_route(dev,
+ private->data->conn_routes,
+ private->data->num_conn_routes,
+ private->ddp_comp);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 93d79a1366e9..26236691ce4c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -3,8 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#ifndef MTK_DRM_DDP_COMP_H
-#define MTK_DRM_DDP_COMP_H
+#ifndef MTK_DDP_COMP_H
+#define MTK_DDP_COMP_H
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -326,8 +326,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp)
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
-unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct device *dev);
+unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev);
int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
unsigned int comp_id);
enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id);
@@ -340,4 +339,4 @@ void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset, unsigned int mask);
-#endif /* MTK_DRM_DDP_COMP_H */
+#endif /* MTK_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 40fe403086c3..3ce8f32b06d5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -11,9 +11,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_AAL_EN 0x0000
@@ -223,7 +223,6 @@ struct platform_driver mtk_disp_aal_driver = {
.remove_new = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_aal_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 465cddce0d32..df35e90dd25f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -10,9 +10,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_CCORR_EN 0x0000
@@ -214,7 +214,6 @@ struct platform_driver mtk_disp_ccorr_driver = {
.remove_new = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_ccorr_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 78ea99f1444f..7f0085be5671 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -10,9 +10,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_COLOR_CFG_MAIN 0x0400
@@ -164,7 +164,6 @@ struct platform_driver mtk_disp_color_driver = {
.remove_new = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_color_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 90e64467ea8f..082ac18fe04a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -9,8 +9,8 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_drm_plane.h"
#include "mtk_mdp_rdma.h"
+#include "mtk_plane.h"
int mtk_aal_clk_enable(struct device *dev);
void mtk_aal_clk_disable(struct device *dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index c1bc8b00d938..ca8d1f3aca03 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -11,9 +11,9 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_GAMMA_EN 0x0000
@@ -334,7 +334,6 @@ struct platform_driver mtk_disp_gamma_driver = {
.remove_new = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_gamma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 32a29924bd54..77c057e0e671 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -10,7 +10,7 @@
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_disp_drv.h"
@@ -376,7 +376,6 @@ struct platform_driver mtk_disp_merge_driver = {
.remove_new = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_merge_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 2bffe4245466..b552a02d7eae 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -15,9 +15,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_OVL_INTEN 0x0004
@@ -659,7 +659,6 @@ struct platform_driver mtk_disp_ovl_driver = {
.remove_new = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_ovl_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 034d31824d4d..02dd7dcdfedb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -17,9 +17,9 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
@@ -629,6 +629,5 @@ struct platform_driver mtk_disp_ovl_adaptor_driver = {
.remove_new = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index faa907f2f443..7b1a6e631200 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -13,9 +13,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DISP_REG_RDMA_INT_ENABLE 0x0000
@@ -428,7 +428,6 @@ struct platform_driver mtk_disp_rdma_driver = {
.remove_new = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
- .owner = THIS_MODULE,
.of_match_table = mtk_disp_rdma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 0ba72102636a..536366956447 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2104,7 +2104,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
!mtk_dp->train_info.cable_plugged_in) {
- ret = -EAGAIN;
+ ret = -EIO;
goto err;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index beb7d9d08e97..bfe8653005db 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -26,9 +26,9 @@
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
enum mtk_dpi_out_bit_num {
@@ -805,7 +805,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
+ dpi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm_dev, dpi->dev);
ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
deleted file mode 100644
index 1f988ff1bf9f..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_CRTC_H
-#define MTK_DRM_CRTC_H
-
-#include <drm/drm_crtc.h>
-#include "mtk_drm_ddp_comp.h"
-#include "mtk_drm_drv.h"
-#include "mtk_drm_plane.h"
-
-#define MTK_MAX_BPC 10
-#define MTK_MIN_BPC 3
-
-void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-int mtk_drm_crtc_create(struct drm_device *drm_dev,
- const unsigned int *path,
- unsigned int path_len,
- int priv_data_index,
- const struct mtk_drm_route *conn_routes,
- unsigned int num_conn_routes);
-int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
- struct mtk_plane_state *state);
-void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_atomic_state *plane_state);
-struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc);
-
-#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74832c213092..b5f605751b0a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -24,10 +24,10 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
+#include "mtk_gem.h"
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
@@ -494,24 +494,24 @@ static int mtk_drm_kms_init(struct drm_device *drm)
priv_n = private->all_drm_private[j];
if (i == CRTC_MAIN && priv_n->data->main_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->main_path,
- priv_n->data->main_len, j,
- priv_n->data->conn_routes,
- priv_n->data->num_conn_routes);
+ ret = mtk_crtc_create(drm, priv_n->data->main_path,
+ priv_n->data->main_len, j,
+ priv_n->data->conn_routes,
+ priv_n->data->num_conn_routes);
if (ret)
goto err_component_unbind;
continue;
} else if (i == CRTC_EXT && priv_n->data->ext_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->ext_path,
- priv_n->data->ext_len, j, NULL, 0);
+ ret = mtk_crtc_create(drm, priv_n->data->ext_path,
+ priv_n->data->ext_len, j, NULL, 0);
if (ret)
goto err_component_unbind;
continue;
} else if (i == CRTC_THIRD && priv_n->data->third_len) {
- ret = mtk_drm_crtc_create(drm, priv_n->data->third_path,
- priv_n->data->third_len, j, NULL, 0);
+ ret = mtk_crtc_create(drm, priv_n->data->third_path,
+ priv_n->data->third_len, j, NULL, 0);
if (ret)
goto err_component_unbind;
@@ -523,7 +523,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
/* Use OVL device for all DMA memory allocations */
crtc = drm_crtc_from_index(drm, 0);
if (crtc)
- dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
+ dma_dev = mtk_crtc_dma_dev_get(crtc);
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
@@ -576,8 +576,8 @@ DEFINE_DRM_GEM_FOPS(mtk_drm_fops);
* We need to override this because the device used to import the memory is
* not dev->dev, as drm_gem_prime_import() expects.
*/
-static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+static struct drm_gem_object *mtk_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
{
struct mtk_drm_private *private = dev->dev_private;
@@ -587,9 +587,9 @@ static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
static const struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .dumb_create = mtk_drm_gem_dumb_create,
+ .dumb_create = mtk_gem_dumb_create,
- .gem_prime_import = mtk_drm_gem_prime_import,
+ .gem_prime_import = mtk_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.fops = &mtk_drm_fops,
@@ -709,6 +709,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8195-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8195-disp-merge",
.data = (void *)MTK_DISP_MERGE },
{ .compatible = "mediatek,mt2701-disp-mutex",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 33fadb08dc1c..78d698ede1bf 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -7,13 +7,13 @@
#define MTK_DRM_DRV_H
#include <linux/io.h>
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_ddp_comp.h"
#define MAX_CONNECTOR 2
#define DDP_COMPONENT_DRM_OVL_ADAPTOR (DDP_COMPONENT_ID_MAX + 1)
#define DDP_COMPONENT_DRM_ID_MAX (DDP_COMPONENT_DRM_OVL_ADAPTOR + 1)
-enum mtk_drm_crtc_path {
+enum mtk_crtc_path {
CRTC_MAIN,
CRTC_EXT,
CRTC_THIRD,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9501f4019199..c255559cc56e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -28,8 +28,8 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#define DSI_START 0x00
@@ -242,22 +242,23 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
- timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
+ timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = timing->da_hs_prepare + 1;
+ timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->ta_go = 4 * timing->lpx - 2;
- timing->ta_sure = timing->lpx + 2;
- timing->ta_get = 4 * timing->lpx;
- timing->da_hs_exit = 2 * timing->lpx + 1;
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
- timing->clk_hs_post = timing->clk_hs_prepare + 8;
- timing->clk_hs_trail = timing->clk_hs_prepare;
- timing->clk_hs_zero = timing->clk_hs_trail * 4;
- timing->clk_hs_exit = 2 * timing->clk_hs_trail;
+ timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
+ timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
+ timing->clk_hs_prepare;
+ timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -662,7 +663,7 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * mtk_dsi_stop() should be called after mtk_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
@@ -836,7 +837,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
return ret;
}
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+ dsi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 6a5d0c345aab..156c6ff547e8 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -14,8 +14,8 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_ethdr.h"
@@ -363,7 +363,6 @@ struct platform_driver mtk_ethdr_driver = {
.remove_new = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
- .owner = THIS_MODULE,
.of_match_table = mtk_ethdr_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index 4f2e3feabc0f..5a82d7cf3ed0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -12,37 +12,40 @@
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
+#include "mtk_gem.h"
-static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
static const struct vm_operations_struct vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
-static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
- .free = mtk_drm_gem_free_object,
+static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
+ .free = mtk_gem_free_object,
.get_sg_table = mtk_gem_prime_get_sg_table,
- .vmap = mtk_drm_gem_prime_vmap,
- .vunmap = mtk_drm_gem_prime_vunmap,
- .mmap = mtk_drm_gem_object_mmap,
+ .vmap = mtk_gem_prime_vmap,
+ .vunmap = mtk_gem_prime_vunmap,
+ .mmap = mtk_gem_object_mmap,
.vm_ops = &vm_ops,
};
-static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
- unsigned long size)
+static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
+ unsigned long size)
{
- struct mtk_drm_gem_obj *mtk_gem_obj;
+ struct mtk_gem_obj *mtk_gem_obj;
int ret;
size = round_up(size, PAGE_SIZE);
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
- mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
+ mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
if (ret < 0) {
@@ -54,15 +57,15 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
return mtk_gem_obj;
}
-struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
- size_t size, bool alloc_kmap)
+struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
+ size_t size, bool alloc_kmap)
{
struct mtk_drm_private *priv = dev->dev_private;
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
struct drm_gem_object *obj;
int ret;
- mtk_gem = mtk_drm_gem_init(dev, size);
+ mtk_gem = mtk_gem_init(dev, size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
@@ -97,9 +100,9 @@ err_gem_free:
return ERR_PTR(ret);
}
-void mtk_drm_gem_free_object(struct drm_gem_object *obj)
+void mtk_gem_free_object(struct drm_gem_object *obj)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
if (mtk_gem->sg)
@@ -114,10 +117,10 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj)
kfree(mtk_gem);
}
-int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
int ret;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
@@ -130,7 +133,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
args->size = args->pitch;
args->size *= args->height;
- mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ mtk_gem = mtk_gem_create(dev, args->size, false);
if (IS_ERR(mtk_gem))
return PTR_ERR(mtk_gem);
@@ -148,16 +151,16 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return 0;
err_handle_create:
- mtk_drm_gem_free_object(&mtk_gem->base);
+ mtk_gem_free_object(&mtk_gem->base);
return ret;
}
-static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int mtk_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
int ret;
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
/*
@@ -188,7 +191,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
*/
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct mtk_drm_private *priv = obj->dev->dev_private;
struct sg_table *sgt;
int ret;
@@ -212,7 +215,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
@@ -220,7 +223,7 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+ mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
@@ -230,9 +233,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
return &mtk_gem->base;
}
-int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct sg_table *sgt = NULL;
unsigned int npages;
@@ -270,10 +273,9 @@ out:
return 0;
}
-void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
- struct iosys_map *map)
+void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
void *vaddr = map->vaddr;
if (!mtk_gem->pages)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h
index 78f23b07a02e..66e5f154f698 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_gem.h
@@ -3,8 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#ifndef _MTK_DRM_GEM_H_
-#define _MTK_DRM_GEM_H_
+#ifndef _MTK_GEM_H_
+#define _MTK_GEM_H_
#include <drm/drm_gem.h>
@@ -22,7 +22,7 @@
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
-struct mtk_drm_gem_obj {
+struct mtk_gem_obj {
struct drm_gem_object base;
void *cookie;
void *kvaddr;
@@ -32,18 +32,17 @@ struct mtk_drm_gem_obj {
struct page **pages;
};
-#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
+#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
-void mtk_drm_gem_free_object(struct drm_gem_object *gem);
-struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
- bool alloc_kmap);
-int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+void mtk_gem_free_object(struct drm_gem_object *gem);
+struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
+ bool alloc_kmap);
+int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
-int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
- struct iosys_map *map);
+int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index c6bdc565e4a9..6e1cca97a654 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1695,7 +1695,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
return 0;
}
-static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+static int mtk_hdmi_probe(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi;
struct device *dev = &pdev->dev;
@@ -1754,7 +1754,7 @@ err_bridge_remove:
return ret;
}
-static void mtk_drm_hdmi_remove(struct platform_device *pdev)
+static void mtk_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
@@ -1798,7 +1798,7 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
.cea_modes_only = true,
};
-static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
+static const struct of_device_id mtk_hdmi_of_ids[] = {
{ .compatible = "mediatek,mt2701-hdmi",
.data = &mtk_hdmi_conf_mt2701,
},
@@ -1809,14 +1809,14 @@ static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids);
+MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
- .probe = mtk_drm_hdmi_probe,
- .remove_new = mtk_drm_hdmi_remove,
+ .probe = mtk_hdmi_probe,
+ .remove_new = mtk_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
- .of_match_table = mtk_drm_hdmi_of_ids,
+ .of_match_table = mtk_hdmi_of_ids,
.pm = &mtk_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 54e46e440e0f..52d55861f954 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -284,8 +284,7 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
return PTR_ERR(ddc->clk);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
+ ddc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(ddc->regs))
return PTR_ERR(ddc->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index ee9ce9b6d078..925cbb7471ec 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -346,7 +346,6 @@ struct platform_driver mtk_mdp_rdma_driver = {
.remove_new = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
- .owner = THIS_MODULE,
.of_match_table = mtk_mdp_rdma_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c
index 0d6451c149b6..85bc6768b6bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_padding.c
+++ b/drivers/gpu/drm/mediatek/mtk_padding.c
@@ -11,9 +11,9 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
#define PADDING_CONTROL_REG 0x00
#define PADDING_BYPASS BIT(0)
@@ -154,7 +154,6 @@ struct platform_driver mtk_padding_driver = {
.remove = mtk_padding_remove,
.driver = {
.name = "mediatek-disp-padding",
- .owner = THIS_MODULE,
.of_match_table = mtk_padding_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index ddc9355b06d5..4625deb21d40 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -13,11 +13,11 @@
#include <drm/drm_gem_atomic_helper.h>
#include <linux/align.h>
-#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp_comp.h"
+#include "mtk_crtc.h"
+#include "mtk_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_gem.h"
-#include "mtk_drm_plane.h"
+#include "mtk_gem.h"
+#include "mtk_plane.h"
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
@@ -93,8 +93,8 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
return true;
}
-static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void mtk_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_mtk_plane_state(state));
@@ -117,8 +117,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
- to_mtk_plane_state(new_plane_state));
+ ret = mtk_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
@@ -135,7 +135,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
{
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_object *gem;
- struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_gem_obj *mtk_gem;
unsigned int pitch, format;
u64 modifier;
dma_addr_t addr;
@@ -232,7 +232,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
swap(plane->state->fb, new_state->fb);
wmb(); /* Make sure the above parameters are set before update */
new_plane_state->pending.async_dirty = true;
- mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+ mtk_crtc_async_update(new_state->crtc, plane, state);
}
static const struct drm_plane_funcs mtk_plane_funcs = {
@@ -241,7 +241,7 @@ static const struct drm_plane_funcs mtk_plane_funcs = {
.destroy = drm_plane_cleanup,
.reset = mtk_plane_reset,
.atomic_duplicate_state = mtk_plane_duplicate_state,
- .atomic_destroy_state = mtk_drm_plane_destroy_state,
+ .atomic_destroy_state = mtk_plane_destroy_state,
.format_mod_supported = mtk_plane_format_mod_supported,
};
@@ -260,8 +260,8 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!new_plane_state->crtc))
return 0;
- ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
- to_mtk_plane_state(new_plane_state));
+ ret = mtk_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 99aff7da0831..231bb7aac947 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -4,8 +4,8 @@
* Author: CK Hu <ck.hu@mediatek.com>
*/
-#ifndef _MTK_DRM_PLANE_H_
-#define _MTK_DRM_PLANE_H_
+#ifndef _MTK_PLANE_H_
+#define _MTK_PLANE_H_
#include <drm/drm_crtc.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 615fdd0ce41b..5520b9e3f010 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -13,9 +13,9 @@ config DRM_MESON
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
+ depends on DRM_DW_HDMI
depends on DRM_MESON
default y if DRM_MESON
- select DRM_DW_HDMI
imply DRM_DW_HDMI_I2S_AUDIO
config DRM_MESON_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index a6bc1bdb3d0d..a10cff3ca1fe 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ clk_disable_unprepare(mipi_dsi->px_clk);
ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
if (ret) {
@@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ ret = clk_prepare_enable(mipi_dsi->px_clk);
+ if (ret) {
+ dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret);
+ return ret;
+ }
+
switch (mipi_dsi->dsi_device->format) {
case MIPI_DSI_FMT_RGB888:
dpi_data_format = DPI_COLOR_24BIT;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a82119eb58e..2a942dc6a6dc 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/10)*10);
+ FREQ_1000_1001(params[i].phy_freq/1000)*1000);
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
(vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 765e49fd8911..58a0e62eaf18 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -366,6 +366,7 @@ struct drm_crtc_state;
struct drm_display_mode;
struct drm_plane;
struct drm_atomic_state;
+struct drm_scanout_buffer;
extern const uint32_t mgag200_primary_plane_formats[];
extern const size_t mgag200_primary_plane_formats_size;
@@ -379,12 +380,16 @@ void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state);
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *old_state);
+int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
.atomic_check = mgag200_primary_plane_helper_atomic_check, \
.atomic_update = mgag200_primary_plane_helper_atomic_update, \
.atomic_enable = mgag200_primary_plane_helper_atomic_enable, \
- .atomic_disable = mgag200_primary_plane_helper_atomic_disable
+ .atomic_disable = mgag200_primary_plane_helper_atomic_disable, \
+ .get_scanout_buffer = mgag200_primary_plane_helper_get_scanout_buffer
#define MGAG200_PRIMARY_PLANE_FUNCS \
.update_plane = drm_atomic_helper_update_plane, \
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index e17cb4c5f774..fc54851d3384 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -21,6 +21,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include "mgag200_drv.h"
@@ -546,6 +547,23 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
msleep(20);
}
+int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct mga_device *mdev = to_mga_device(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
+ }
+ return -ENODEV;
+}
+
/*
* CRTC
*/
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 4c9bf237d4a2..cd03e1ad4e3c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,9 +2,12 @@
config DRM_MSM
tristate "MSM DRM"
- depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
+ depends on DRM
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on IOMMU_SUPPORT
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
@@ -14,9 +17,6 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
- select DRM_DP_AUX_BUS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index f4b4cd084282..2c720f1fc1b2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/debugfs.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 175ee4ab8a6f..a5c6498a43f0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,7 @@ static void put_pages(struct drm_gem_object *obj)
}
}
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
+static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -257,24 +257,24 @@ static void pin_obj_locked(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct page **p;
- msm_gem_lock(obj);
- p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ msm_gem_assert_locked(obj);
+
+ p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (!IS_ERR(p))
pin_obj_locked(obj);
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_unpin_pages(struct drm_gem_object *obj)
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
+
msm_gem_unpin_locked(obj);
- msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
@@ -489,7 +489,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
msm_gem_assert_locked(obj);
- pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -703,7 +703,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (obj->import_attach)
return ERR_PTR(-ENODEV);
- pages = msm_gem_pin_pages_locked(obj, madv);
+ pages = msm_gem_get_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8d414b072c29..85f0257e83da 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -140,8 +140,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
-void msm_gem_unpin_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 0915f3b68752..ee267490c935 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -47,13 +47,23 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach)
- msm_gem_pin_pages(obj);
- return 0;
+ struct page **pages;
+ int ret = 0;
+
+ if (obj->import_attach)
+ return 0;
+
+ pages = msm_gem_pin_pages_locked(obj);
+ if (IS_ERR(pages))
+ ret = PTR_ERR(pages);
+
+ return ret;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach)
- msm_gem_unpin_pages(obj);
+ if (obj->import_attach)
+ return;
+
+ msm_gem_unpin_pages_locked(obj);
}
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index ea10bf81582e..0f895b8a99d6 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
if (ret)
return ret;
+ if (pm_runtime_suspended(dev))
+ return 0;
+
return lcdif_rpm_suspend(dev);
}
@@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- lcdif_rpm_resume(dev);
+ if (!pm_runtime_suspended(dev))
+ lcdif_rpm_resume(dev);
return drm_mode_config_helper_resume(drm);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ceef470c9fbf..4c10b400658c 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI && MMU
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on PCI
+ depends on MMU
select IOMMU_API
select FW_LOADER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 9c942fbd836d..5936b6b3b15d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: MIT
+#include <linux/debugfs.h>
#include <linux/string.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 80f74ee0fc78..f465fe93b1f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,11 +312,21 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
- case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
- case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
- case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
- case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
- case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
+ case NOUVEAU_FIFO_ENGINE_GR:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
+ break;
+ case NOUVEAU_FIFO_ENGINE_VP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
+ break;
+ case NOUVEAU_FIFO_ENGINE_PPP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
+ break;
+ case NOUVEAU_FIFO_ENGINE_BSP:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
+ break;
+ case NOUVEAU_FIFO_ENGINE_CE:
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
+ break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 11c8c4a80079..661b901d8ecc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -50,18 +50,6 @@ struct drm_nouveau_grobj_alloc {
int class;
};
-struct drm_nouveau_notifierobj_alloc {
- uint32_t channel;
- uint32_t handle;
- uint32_t size;
- uint32_t offset;
-};
-
-struct drm_nouveau_gpuobj_free {
- int channel;
- uint32_t handle;
-};
-
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf607e..79cfab53f80e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db8cbf615112..1e2d28fd10dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -467,17 +467,14 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
set_placement_range(nvbo, domain);
}
-int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
bool force = false, evict = false;
- int ret;
+ int ret = 0;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
@@ -540,20 +537,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
out:
if (force && ret)
nvbo->contig = false;
- ttm_bo_unreserve(bo);
return ret;
}
-int
-nouveau_bo_unpin(struct nouveau_bo *nvbo)
+void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
@@ -568,8 +560,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
break;
}
}
+}
+
+int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ ret = nouveau_bo_pin_locked(nvbo, domain, contig);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ nouveau_bo_unpin_locked(nvbo);
ttm_bo_unreserve(bo);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index e9dfab6a8156..4e891752c255 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -85,6 +85,8 @@ int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
+int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig);
+void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo);
int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f28f9a857458..aed5d5b51b43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -83,7 +83,7 @@ static bool
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct nvif_head *head = &nouveau_crtc(crtc)->head;
struct nvif_head_scanoutpos_v0 args;
int retry = 20;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e75..6fb65b01d778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7de7707ec6a8..bcda0105160f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -181,7 +181,7 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
if (nouveau_mst) {
mstm = outp->dp.mstm;
if (mstm)
- mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
+ mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd) == DRM_DP_MST;
}
if (nouveau_dp_has_sink_count(connector, outp)) {
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
- /* If we've already read the DPCD on an eDP device, we don't need to
- * reread it as it won't change
+ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+ * haven't probed them once before.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- dpcd[DP_DPCD_REV] != 0)
- return NOUVEAU_DP_SST;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->status == connector_status_connected)
+ return NOUVEAU_DP_SST;
+ else if (connector->status == connector_status_disconnected)
+ return NOUVEAU_DP_NONE;
+ }
+
+ // Ensure that the aux bus is enabled for probing
+ drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
@@ -293,6 +299,13 @@ out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
+ /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+ * and since we don't really have a usecase for that anyway - just disable the aux bus here
+ * if we've decided the connector is disconnected
+ */
+ if (ret == NOUVEAU_DP_NONE)
+ drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 1b2ff0c40fc1..b58ab595faf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -89,18 +89,18 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
+ ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin_locked(nvbo);
}
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 0a0a11dc9ec0..ee02cd833c5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -812,15 +812,15 @@ op_remap(struct drm_gpuva_op_remap *r,
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
- u64 range = uvma->va.va.range;
+ u64 end = uvma->va.va.addr + uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
- range = r->next->va.addr - addr;
+ end = r->next->va.addr;
- op_unmap_range(u, addr, range);
+ op_unmap_range(u, addr, end - addr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 6a0a4d3b8902..027867c2a8c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret) {
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return PTR_ERR(ctrl);
+ return ret;
}
memcpy(data, ctrl->data, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 986e8d547c94..060c74a80eb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -420,7 +420,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
return ret;
} else {
ret = nvkm_memory_map(gr->attrib_cb, 0, chan->vmm, chan->attrib_cb,
- &args, sizeof(args));;
+ &args, sizeof(args));
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b57101..cb05f7f48a98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 7bcbc4895ec2..271bfa038f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gsp.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
@@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
- if (r021c00 & 0x00000001)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- if (r021c00 & 0x00000004)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ /* gsp only wants to enable/disable display */
+ if (!nvkm_gsp_rm(device->gsp)) {
+ if (r021c00 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
+ if (r021c00 & 0x00000004)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ }
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 11b4c9c274a1..666eb93b1742 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,6 +41,7 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
+ rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 9994cbd6f1c4..9858c1438aa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
- strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+ strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342d87..dd5b5a17ece0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index b715301ec79f..6c49270cb290 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,7 +4,7 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
default n
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9753c1e1f994..1aca3060333e 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -1212,7 +1212,6 @@ struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove_new = omap_dmm_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
.pm = &omap_dmm_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1d414b33fee3..449d521c78fe 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
#include <drm/drm_blend.h>
#include <drm/drm_modeset_helper.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 6b08b137af1a..523be34682ca 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
omap_gem_roll(bo, fbi->var.yoffset * npages);
}
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
+
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
@@ -78,11 +82,9 @@ fallback:
static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct drm_fb_helper *helper = info->par;
- struct drm_framebuffer *fb = helper->fb;
- struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
+ return fb_deferred_io_mmap(info, vma);
}
static void omap_fbdev_fb_destroy(struct fb_info *info)
@@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
DBG();
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(helper);
omap_gem_unpin(bo);
@@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
kfree(fbdev);
}
+/*
+ * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
+ * because we use write-combine.
+ */
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
- __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
.fb_ioctl = drm_fb_helper_ioctl,
.fb_mmap = omap_fbdev_fb_mmap,
.fb_destroy = omap_fbdev_fb_destroy,
@@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = bo->size;
+ /* deferred I/O */
+ helper->fbdefio.delay = HZ / 20;
+ helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ fbi->fbdefio = &helper->fbdefio;
+ ret = fb_deferred_io_init(fbi);
+ if (ret)
+ goto fail;
+
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
@@ -238,8 +254,20 @@ fail:
return ret;
}
+static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
+ .fb_dirty = omap_fbdev_dirty,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d037b3b8b999..e54f6f5604ed 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -335,6 +335,17 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_LG_SW43408
+ tristate "LG SW43408 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for LG sw43408 panel.
+ The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
+ pixel. It provides a MIPI DSI interface to the host and has a
+ built-in LED backlight.
+
config DRM_PANEL_MAGNACHIP_D53E6EA8966
tristate "Magnachip D53E6EA8966 DSI panel"
depends on OF && SPI
@@ -533,15 +544,27 @@ config DRM_PANEL_RAYDIUM_RM68200
config DRM_PANEL_RAYDIUM_RM692E5
tristate "Raydium RM692E5-based DSI panel"
- depends on OF
- depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on DRM_MIPI_DSI
+ depends on OF
help
Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone.
+config DRM_PANEL_RAYDIUM_RM69380
+ tristate "Raydium RM69380-based DSI panel"
+ depends on OF && GPIOLIB
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM69380-based
+ display panels.
+
+ This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
+ in combination with an EDO OLED panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
@@ -559,12 +582,12 @@ config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
config DRM_PANEL_SAMSUNG_ATNA33XC20
tristate "Samsung ATNA33XC20 eDP panel"
- depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on OF
depends on PM
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
help
DRM panel driver for the Samsung ATNA33XC20 panel. This panel can't
be handled by the DRM_PANEL_SIMPLE driver because its power
@@ -586,6 +609,15 @@ config DRM_PANEL_SAMSUNG_LD9040
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E3FA7
+ tristate "Samsung S6E3FA7 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Samsung S6E3FA7
+ 1920x2220 panel.
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
@@ -790,13 +822,13 @@ config DRM_PANEL_STARTEK_KD070FHFID015
config DRM_PANEL_EDP
tristate "support for simple Embedded DisplayPort panels"
- depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on OF
depends on PM
select VIDEOMODE_HELPERS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
@@ -870,11 +902,11 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
- depends on OF
- depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on DRM_MIPI_DSI
+ depends on OF
help
Say Y here if you want to enable support for Visionox
R66451 1080x2340 AMOLED DSI panel.
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f156d7fa0bcc..f0203f6e02f4 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index d58f90bc48fb..6db277efcbb7 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -210,15 +210,12 @@ struct panel_desc {
* struct edp_panel_entry - Maps panel ID to delay / panel name.
*/
struct edp_panel_entry {
- /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
- u32 panel_id;
+ /** @ident: edid identity used for panel matching. */
+ const struct drm_edid_ident ident;
/** @delay: The power sequencing delays needed for this panel. */
const struct panel_delay *delay;
- /** @name: Name of this panel (for printing to logs). */
- const char *name;
-
/** @override_edid_mode: Override the mode obtained by edid. */
const struct drm_display_mode *override_edid_mode;
};
@@ -245,7 +242,7 @@ struct panel_edp {
const struct edp_panel_entry *detected_panel;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct drm_display_mode override_mode;
@@ -620,13 +617,16 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (p->ddc) {
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, p->ddc);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, p->ddc);
+
+ drm_edid_connector_update(connector, p->drm_edid);
+
/*
* If both edid and hard-coded modes exists, skip edid modes to
* avoid multiple preferred modes.
*/
- if (p->edid && !has_hard_coded_modes) {
+ if (p->drm_edid && !has_hard_coded_modes) {
if (has_override_edid_mode) {
/*
* override_edid_mode is specified. Use
@@ -635,7 +635,7 @@ static int panel_edp_get_modes(struct drm_panel *panel,
num += panel_edp_override_edid_mode(p, connector,
p->detected_panel->override_edid_mode);
} else {
- num += drm_add_edid_modes(connector, p->edid);
+ num += drm_edid_connector_add_modes(connector);
}
}
@@ -691,7 +691,7 @@ static int detected_panel_show(struct seq_file *s, void *data)
else if (!p->detected_panel)
seq_puts(s, "HARDCODED\n");
else
- seq_printf(s, "%s\n", p->detected_panel->name);
+ seq_printf(s, "%s\n", p->detected_panel->ident.name);
return 0;
}
@@ -761,11 +761,31 @@ static void panel_edp_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
-static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid);
+
+static void panel_edp_set_conservative_timings(struct panel_edp *panel, struct panel_desc *desc)
+{
+ /*
+ * It's highly likely that the panel will work if we use very
+ * conservative timings, so let's do that.
+ *
+ * Nearly all panels have a "unprepare" delay of 500 ms though
+ * there are a few with 1000. Let's stick 2000 in just to be
+ * super conservative.
+ *
+ * An "enable" delay of 80 ms seems the most common, but we'll
+ * throw in 200 ms to be safe.
+ */
+ desc->delay.unprepare = 2000;
+ desc->delay.enable = 200;
+
+ panel->detected_panel = ERR_PTR(-EINVAL);
+}
static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
{
struct panel_desc *desc;
+ const struct drm_edid *base_block;
u32 panel_id;
char vend[4];
u16 product_id;
@@ -791,19 +811,26 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
/* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret);
+ dev_err(dev,
+ "Couldn't power on panel to ID it; using conservative timings: %d\n",
+ ret);
+ panel_edp_set_conservative_timings(panel, desc);
goto exit;
}
- panel_id = drm_edid_get_panel_id(panel->ddc);
- if (!panel_id) {
- dev_err(dev, "Couldn't identify panel via EDID\n");
- ret = -EIO;
+ base_block = drm_edid_read_base_block(panel->ddc);
+ if (base_block) {
+ panel_id = drm_edid_get_panel_id(base_block);
+ } else {
+ dev_err(dev, "Couldn't read EDID for ID; using conservative timings\n");
+ panel_edp_set_conservative_timings(panel, desc);
goto exit;
}
drm_edid_decode_panel_id(panel_id, vend, &product_id);
- panel->detected_panel = find_edp_panel(panel_id);
+ panel->detected_panel = find_edp_panel(panel_id, base_block);
+
+ drm_edid_free(base_block);
/*
* We're using non-optimized timings and want it really obvious that
@@ -814,40 +841,20 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
dev_warn(dev,
"Unknown panel %s %#06x, using conservative timings\n",
vend, product_id);
-
- /*
- * It's highly likely that the panel will work if we use very
- * conservative timings, so let's do that. We already know that
- * the HPD-related delays must have worked since we got this
- * far, so we really just need the "unprepare" / "enable"
- * delays. We don't need "prepare_to_enable" since that
- * overlaps the "enable" delay anyway.
- *
- * Nearly all panels have a "unprepare" delay of 500 ms though
- * there are a few with 1000. Let's stick 2000 in just to be
- * super conservative.
- *
- * An "enable" delay of 80 ms seems the most common, but we'll
- * throw in 200 ms to be safe.
- */
- desc->delay.unprepare = 2000;
- desc->delay.enable = 200;
-
- panel->detected_panel = ERR_PTR(-EINVAL);
+ panel_edp_set_conservative_timings(panel, desc);
} else {
dev_info(dev, "Detected %s %s (%#06x)\n",
- vend, panel->detected_panel->name, product_id);
+ vend, panel->detected_panel->ident.name, product_id);
/* Update the delay; everything else comes from EDID */
desc->delay = *panel->detected_panel->delay;
}
- ret = 0;
exit:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
- return ret;
+ return 0;
}
static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
@@ -940,8 +947,14 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+
+ /*
+ * Warn if we get an error, but don't consider it fatal. Having
+ * a panel where we can't control the backlight is better than
+ * no panel.
+ */
if (err)
- goto err_finished_pm_runtime;
+ dev_warn(dev, "failed to register dp aux backlight: %d\n", err);
}
drm_panel_add(&panel->base);
@@ -971,8 +984,8 @@ static void panel_edp_remove(struct device *dev)
if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
put_device(&panel->ddc->dev);
- kfree(panel->edid);
- panel->edid = NULL;
+ drm_edid_free(panel->drm_edid);
+ panel->drm_edid = NULL;
}
static void panel_edp_shutdown(struct device *dev)
@@ -1005,6 +1018,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
+static const struct drm_display_mode auo_b116xa3_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1865,6 +1891,13 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e50_p2e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_200_500_e80 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1919,17 +1952,21 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
- .name = _name, \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ }, \
.delay = _delay \
}
#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
{ \
- .name = _name, \
- .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
- product_id), \
+ .ident = { \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ }, \
.delay = _delay, \
.override_edid_mode = _mode \
}
@@ -1953,7 +1990,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
@@ -1961,6 +2000,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"),
@@ -2010,6 +2050,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -2025,6 +2066,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -2034,7 +2076,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
+
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
@@ -2076,15 +2120,25 @@ static const struct edp_panel_entry edp_panels[] = {
{ /* sentinal */ }
};
-static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid)
{
const struct edp_panel_entry *panel;
if (!panel_id)
return NULL;
- for (panel = edp_panels; panel->panel_id; panel++)
- if (panel->panel_id == panel_id)
+ /*
+ * Match with identity first. This allows handling the case where
+ * vendors incorrectly reused the same panel ID for multiple panels that
+ * need different settings. If there's no match, try again with panel
+ * ID, which should be unique.
+ */
+ for (panel = edp_panels; panel->ident.panel_id; panel++)
+ if (drm_edid_match(edid, &panel->ident))
+ return panel;
+
+ for (panel = edp_panels; panel->ident.panel_id; panel++)
+ if (panel->ident.panel_id == panel_id)
return panel;
return NULL;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 2ffe5f68a890..084c37fa7348 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -455,6 +455,202 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
};
+static const struct ili9881c_instr kd050hdfia020_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x72),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x09),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x05),
+ ILI9881C_COMMAND_INSTR(0x20, 0x05),
+ ILI9881C_COMMAND_INSTR(0x21, 0x02),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x02),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x15),
+ ILI9881C_COMMAND_INSTR(0x62, 0x14),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x66, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x67, 0x06),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x01),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x14),
+ ILI9881C_COMMAND_INSTR(0x78, 0x15),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x07),
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(0x4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x15),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_SWITCH_PAGE_INSTR(0x1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x90),
+ ILI9881C_COMMAND_INSTR(0x55, 0xA2),
+ ILI9881C_COMMAND_INSTR(0x50, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x60, 0x22),
+ ILI9881C_COMMAND_INSTR(0x61, 0x00),
+ ILI9881C_COMMAND_INSTR(0x62, 0x19),
+ ILI9881C_COMMAND_INSTR(0x63, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1E),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1F),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x8B),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x27),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x78),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4C),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x21),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1E),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1F),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x8B),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x27),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x78),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4C),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x21),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x7),
+};
+
static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -1080,10 +1276,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
msleep(5);
/* And reset it */
- gpiod_set_value(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->reset, 1);
msleep(20);
- gpiod_set_value(ctx->reset, 0);
+ gpiod_set_value_cansleep(ctx->reset, 0);
msleep(20);
for (i = 0; i < ctx->desc->init_length; i++) {
@@ -1138,7 +1334,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
regulator_disable(ctx->power);
- gpiod_set_value(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->reset, 1);
return 0;
}
@@ -1177,6 +1373,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.height_mm = 217,
};
+static const struct drm_display_mode kd050hdfia020_default_mode = {
+ .clock = 62000,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 10,
+ .hsync_end = 720 + 10 + 20,
+ .htotal = 720 + 10 + 20 + 30,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 10,
+ .vsync_end = 1280 + 10 + 10,
+ .vtotal = 1280 + 10 + 10 + 20,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
static const struct drm_display_mode tl050hdv35_default_mode = {
.clock = 59400,
@@ -1345,6 +1558,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
};
+static const struct ili9881c_desc kd050hdfia020_desc = {
+ .init = kd050hdfia020_init,
+ .init_length = ARRAY_SIZE(kd050hdfia020_init),
+ .mode = &kd050hdfia020_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+};
+
static const struct ili9881c_desc tl050hdv35_desc = {
.init = tl050hdv35_init,
.init_length = ARRAY_SIZE(tl050hdv35_init),
@@ -1372,6 +1593,7 @@ static const struct ili9881c_desc am8001280g_desc = {
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
+ { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 3e0a8e0d58a0..483dc88d16d8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -247,6 +247,7 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index b942a0162274..c54be0cc3f08 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -25,6 +25,7 @@ struct khadas_ts050_panel {
struct regulator *supply;
struct gpio_desc *reset_gpio;
struct gpio_desc *enable_gpio;
+ struct khadas_ts050_panel_data *panel_data;
bool prepared;
bool enabled;
@@ -32,544 +33,601 @@ struct khadas_ts050_panel {
struct khadas_ts050_panel_cmd {
u8 cmd;
- u8 data;
+ u8 data[55];
+ u8 size;
+};
+
+struct khadas_ts050_panel_data {
+ struct khadas_ts050_panel_cmd *init_code;
+ int len;
+};
+
+static const struct khadas_ts050_panel_cmd ts050v2_init_code[] = {
+ {0xB9, {0xFF, 0x83, 0x99}, 0x03},
+ {0xBA, {0x63, 0x23, 0x68, 0xCF}, 0x04},
+ {0xD2, {0x55}, 0x01},
+ {0xB1, {0x02, 0x04, 0x70, 0x90, 0x01, 0x32, 0x33,
+ 0x11, 0x11, 0x4D, 0x57, 0x56, 0x73, 0x02, 0x02}, 0x0f},
+ {0xB2, {0x00, 0x80, 0x80, 0xAE, 0x0A, 0x0E, 0x75, 0x11, 0x00, 0x00, 0x00}, 0x0b},
+ {0xB4, {0x00, 0xFF, 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02,
+ 0x00, 0x24, 0x02, 0x04, 0x0A, 0x21, 0x03, 0x00, 0x00, 0x08, 0xA6, 0x88,
+ 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x24,
+ 0x02, 0x04, 0x0A, 0x00, 0x00, 0x08, 0xA6, 0x00, 0x08, 0x11}, 0x2e},
+ {0xD3, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+ 0x18, 0x32, 0x10, 0x09, 0x00, 0x09, 0x32,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0x00, 0x02, 0x02, 0x03, 0x00, 0x00, 0x00, 0x0A,
+ 0x40}, 0x21},
+ {0xD5, {0x18, 0x18, 0x18, 0x18, 0x21, 0x20, 0x18, 0x18, 0x19, 0x19, 0x19,
+ 0x19, 0x18, 0x18, 0x18, 0x18, 0x03, 0x02, 0x01, 0x00, 0x2F, 0x2F,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20},
+ {0xD6, {0x18, 0x18, 0x18, 0x18, 0x20, 0x21, 0x19, 0x19, 0x18, 0x18, 0x19,
+ 0x19, 0x18, 0x18, 0x18, 0x18, 0x00, 0x01, 0x02, 0x03, 0x2F, 0x2F,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20},
+ {0xD8, {0x0A, 0xBE, 0xFA, 0xA0, 0x0A, 0xBE, 0xFA, 0xA0}, 0x08},
+ {0xBD, {0x01}, 0x01},
+ {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08},
+ {0xBD, {0x02}, 0x01},
+ {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08},
+ {0xBD, {0x00}, 0x01},
+ {0xE0, {0x01, 0x35, 0x41, 0x3B, 0x79, 0x81, 0x8C, 0x85, 0x8E,
+ 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1, 0xB3, 0xB7, 0xC5, 0xBD, 0xC5,
+ 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66, 0x73, 0x01, 0x35, 0x41, 0x3B,
+ 0x79, 0x81, 0x8C, 0x85, 0x8E, 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1,
+ 0xB3, 0xB7, 0xB5, 0xBD, 0xC5, 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66,
+ 0x73}, 0x36},
+ {0xB6, {0x97, 0x97}, 0x02},
+ {0xCC, {0xC8}, 0x02},
+ {0xBF, {0x40, 0x41, 0x50, 0x19}, 0x04},
+ {0xC6, {0xFF, 0xF9}, 0x02},
+ {0xC0, {0x25, 0x5A}, 0x02},
};
/* Only the CMD1 User Command set is documented */
-static const struct khadas_ts050_panel_cmd init_code[] = {
+static const struct khadas_ts050_panel_cmd ts050_init_code[] = {
/* Select Unknown CMD Page (Undocumented) */
- {0xff, 0xee},
+ {0xff, {0xee}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x1f, 0x45},
- {0x24, 0x4f},
- {0x38, 0xc8},
- {0x39, 0x27},
- {0x1e, 0x77},
- {0x1d, 0x0f},
- {0x7e, 0x71},
- {0x7c, 0x03},
- {0xff, 0x00},
- {0xfb, 0x01},
- {0x35, 0x01},
+ {0xfb, {0x01}, 0x01},
+ {0x1f, {0x45}, 0x01},
+ {0x24, {0x4f}, 0x01},
+ {0x38, {0xc8}, 0x01},
+ {0x39, {0x27}, 0x01},
+ {0x1e, {0x77}, 0x01},
+ {0x1d, {0x0f}, 0x01},
+ {0x7e, {0x71}, 0x01},
+ {0x7c, {0x03}, 0x01},
+ {0xff, {0x00}, 0x01},
+ {0xfb, {0x01}, 0x01},
+ {0x35, {0x01}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x01},
- {0x01, 0x55},
- {0x02, 0x40},
- {0x05, 0x40},
- {0x06, 0x4a},
- {0x07, 0x24},
- {0x08, 0x0c},
- {0x0b, 0x7d},
- {0x0c, 0x7d},
- {0x0e, 0xb0},
- {0x0f, 0xae},
- {0x11, 0x10},
- {0x12, 0x10},
- {0x13, 0x03},
- {0x14, 0x4a},
- {0x15, 0x12},
- {0x16, 0x12},
- {0x18, 0x00},
- {0x19, 0x77},
- {0x1a, 0x55},
- {0x1b, 0x13},
- {0x1c, 0x00},
- {0x1d, 0x00},
- {0x1e, 0x13},
- {0x1f, 0x00},
- {0x23, 0x00},
- {0x24, 0x00},
- {0x25, 0x00},
- {0x26, 0x00},
- {0x27, 0x00},
- {0x28, 0x00},
- {0x35, 0x00},
- {0x66, 0x00},
- {0x58, 0x82},
- {0x59, 0x02},
- {0x5a, 0x02},
- {0x5b, 0x02},
- {0x5c, 0x82},
- {0x5d, 0x82},
- {0x5e, 0x02},
- {0x5f, 0x02},
- {0x72, 0x31},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x01}, 0x01},
+ {0x01, {0x55}, 0x01},
+ {0x02, {0x40}, 0x01},
+ {0x05, {0x40}, 0x01},
+ {0x06, {0x4a}, 0x01},
+ {0x07, {0x24}, 0x01},
+ {0x08, {0x0c}, 0x01},
+ {0x0b, {0x7d}, 0x01},
+ {0x0c, {0x7d}, 0x01},
+ {0x0e, {0xb0}, 0x01},
+ {0x0f, {0xae}, 0x01},
+ {0x11, {0x10}, 0x01},
+ {0x12, {0x10}, 0x01},
+ {0x13, {0x03}, 0x01},
+ {0x14, {0x4a}, 0x01},
+ {0x15, {0x12}, 0x01},
+ {0x16, {0x12}, 0x01},
+ {0x18, {0x00}, 0x01},
+ {0x19, {0x77}, 0x01},
+ {0x1a, {0x55}, 0x01},
+ {0x1b, {0x13}, 0x01},
+ {0x1c, {0x00}, 0x01},
+ {0x1d, {0x00}, 0x01},
+ {0x1e, {0x13}, 0x01},
+ {0x1f, {0x00}, 0x01},
+ {0x23, {0x00}, 0x01},
+ {0x24, {0x00}, 0x01},
+ {0x25, {0x00}, 0x01},
+ {0x26, {0x00}, 0x01},
+ {0x27, {0x00}, 0x01},
+ {0x28, {0x00}, 0x01},
+ {0x35, {0x00}, 0x01},
+ {0x66, {0x00}, 0x01},
+ {0x58, {0x82}, 0x01},
+ {0x59, {0x02}, 0x01},
+ {0x5a, {0x02}, 0x01},
+ {0x5b, {0x02}, 0x01},
+ {0x5c, {0x82}, 0x01},
+ {0x5d, {0x82}, 0x01},
+ {0x5e, {0x02}, 0x01},
+ {0x5f, {0x02}, 0x01},
+ {0x72, {0x31}, 0x01},
/* Select CMD2 Page4 (Undocumented) */
- {0xff, 0x05},
+ {0xff, {0x05}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x01},
- {0x01, 0x0b},
- {0x02, 0x0c},
- {0x03, 0x09},
- {0x04, 0x0a},
- {0x05, 0x00},
- {0x06, 0x0f},
- {0x07, 0x10},
- {0x08, 0x00},
- {0x09, 0x00},
- {0x0a, 0x00},
- {0x0b, 0x00},
- {0x0c, 0x00},
- {0x0d, 0x13},
- {0x0e, 0x15},
- {0x0f, 0x17},
- {0x10, 0x01},
- {0x11, 0x0b},
- {0x12, 0x0c},
- {0x13, 0x09},
- {0x14, 0x0a},
- {0x15, 0x00},
- {0x16, 0x0f},
- {0x17, 0x10},
- {0x18, 0x00},
- {0x19, 0x00},
- {0x1a, 0x00},
- {0x1b, 0x00},
- {0x1c, 0x00},
- {0x1d, 0x13},
- {0x1e, 0x15},
- {0x1f, 0x17},
- {0x20, 0x00},
- {0x21, 0x03},
- {0x22, 0x01},
- {0x23, 0x40},
- {0x24, 0x40},
- {0x25, 0xed},
- {0x29, 0x58},
- {0x2a, 0x12},
- {0x2b, 0x01},
- {0x4b, 0x06},
- {0x4c, 0x11},
- {0x4d, 0x20},
- {0x4e, 0x02},
- {0x4f, 0x02},
- {0x50, 0x20},
- {0x51, 0x61},
- {0x52, 0x01},
- {0x53, 0x63},
- {0x54, 0x77},
- {0x55, 0xed},
- {0x5b, 0x00},
- {0x5c, 0x00},
- {0x5d, 0x00},
- {0x5e, 0x00},
- {0x5f, 0x15},
- {0x60, 0x75},
- {0x61, 0x00},
- {0x62, 0x00},
- {0x63, 0x00},
- {0x64, 0x00},
- {0x65, 0x00},
- {0x66, 0x00},
- {0x67, 0x00},
- {0x68, 0x04},
- {0x69, 0x00},
- {0x6a, 0x00},
- {0x6c, 0x40},
- {0x75, 0x01},
- {0x76, 0x01},
- {0x7a, 0x80},
- {0x7b, 0xa3},
- {0x7c, 0xd8},
- {0x7d, 0x60},
- {0x7f, 0x15},
- {0x80, 0x81},
- {0x83, 0x05},
- {0x93, 0x08},
- {0x94, 0x10},
- {0x8a, 0x00},
- {0x9b, 0x0f},
- {0xea, 0xff},
- {0xec, 0x00},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x01}, 0x01},
+ {0x01, {0x0b}, 0x01},
+ {0x02, {0x0c}, 0x01},
+ {0x03, {0x09}, 0x01},
+ {0x04, {0x0a}, 0x01},
+ {0x05, {0x00}, 0x01},
+ {0x06, {0x0f}, 0x01},
+ {0x07, {0x10}, 0x01},
+ {0x08, {0x00}, 0x01},
+ {0x09, {0x00}, 0x01},
+ {0x0a, {0x00}, 0x01},
+ {0x0b, {0x00}, 0x01},
+ {0x0c, {0x00}, 0x01},
+ {0x0d, {0x13}, 0x01},
+ {0x0e, {0x15}, 0x01},
+ {0x0f, {0x17}, 0x01},
+ {0x10, {0x01}, 0x01},
+ {0x11, {0x0b}, 0x01},
+ {0x12, {0x0c}, 0x01},
+ {0x13, {0x09}, 0x01},
+ {0x14, {0x0a}, 0x01},
+ {0x15, {0x00}, 0x01},
+ {0x16, {0x0f}, 0x01},
+ {0x17, {0x10}, 0x01},
+ {0x18, {0x00}, 0x01},
+ {0x19, {0x00}, 0x01},
+ {0x1a, {0x00}, 0x01},
+ {0x1b, {0x00}, 0x01},
+ {0x1c, {0x00}, 0x01},
+ {0x1d, {0x13}, 0x01},
+ {0x1e, {0x15}, 0x01},
+ {0x1f, {0x17}, 0x01},
+ {0x20, {0x00}, 0x01},
+ {0x21, {0x03}, 0x01},
+ {0x22, {0x01}, 0x01},
+ {0x23, {0x40}, 0x01},
+ {0x24, {0x40}, 0x01},
+ {0x25, {0xed}, 0x01},
+ {0x29, {0x58}, 0x01},
+ {0x2a, {0x12}, 0x01},
+ {0x2b, {0x01}, 0x01},
+ {0x4b, {0x06}, 0x01},
+ {0x4c, {0x11}, 0x01},
+ {0x4d, {0x20}, 0x01},
+ {0x4e, {0x02}, 0x01},
+ {0x4f, {0x02}, 0x01},
+ {0x50, {0x20}, 0x01},
+ {0x51, {0x61}, 0x01},
+ {0x52, {0x01}, 0x01},
+ {0x53, {0x63}, 0x01},
+ {0x54, {0x77}, 0x01},
+ {0x55, {0xed}, 0x01},
+ {0x5b, {0x00}, 0x01},
+ {0x5c, {0x00}, 0x01},
+ {0x5d, {0x00}, 0x01},
+ {0x5e, {0x00}, 0x01},
+ {0x5f, {0x15}, 0x01},
+ {0x60, {0x75}, 0x01},
+ {0x61, {0x00}, 0x01},
+ {0x62, {0x00}, 0x01},
+ {0x63, {0x00}, 0x01},
+ {0x64, {0x00}, 0x01},
+ {0x65, {0x00}, 0x01},
+ {0x66, {0x00}, 0x01},
+ {0x67, {0x00}, 0x01},
+ {0x68, {0x04}, 0x01},
+ {0x69, {0x00}, 0x01},
+ {0x6a, {0x00}, 0x01},
+ {0x6c, {0x40}, 0x01},
+ {0x75, {0x01}, 0x01},
+ {0x76, {0x01}, 0x01},
+ {0x7a, {0x80}, 0x01},
+ {0x7b, {0xa3}, 0x01},
+ {0x7c, {0xd8}, 0x01},
+ {0x7d, {0x60}, 0x01},
+ {0x7f, {0x15}, 0x01},
+ {0x80, {0x81}, 0x01},
+ {0x83, {0x05}, 0x01},
+ {0x93, {0x08}, 0x01},
+ {0x94, {0x10}, 0x01},
+ {0x8a, {0x00}, 0x01},
+ {0x9b, {0x0f}, 0x01},
+ {0xea, {0xff}, 0x01},
+ {0xec, {0x00}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x75, 0x00},
- {0x76, 0xdf},
- {0x77, 0x00},
- {0x78, 0xe4},
- {0x79, 0x00},
- {0x7a, 0xed},
- {0x7b, 0x00},
- {0x7c, 0xf6},
- {0x7d, 0x00},
- {0x7e, 0xff},
- {0x7f, 0x01},
- {0x80, 0x07},
- {0x81, 0x01},
- {0x82, 0x10},
- {0x83, 0x01},
- {0x84, 0x18},
- {0x85, 0x01},
- {0x86, 0x20},
- {0x87, 0x01},
- {0x88, 0x3d},
- {0x89, 0x01},
- {0x8a, 0x56},
- {0x8b, 0x01},
- {0x8c, 0x84},
- {0x8d, 0x01},
- {0x8e, 0xab},
- {0x8f, 0x01},
- {0x90, 0xec},
- {0x91, 0x02},
- {0x92, 0x22},
- {0x93, 0x02},
- {0x94, 0x23},
- {0x95, 0x02},
- {0x96, 0x55},
- {0x97, 0x02},
- {0x98, 0x8b},
- {0x99, 0x02},
- {0x9a, 0xaf},
- {0x9b, 0x02},
- {0x9c, 0xdf},
- {0x9d, 0x03},
- {0x9e, 0x01},
- {0x9f, 0x03},
- {0xa0, 0x2c},
- {0xa2, 0x03},
- {0xa3, 0x39},
- {0xa4, 0x03},
- {0xa5, 0x47},
- {0xa6, 0x03},
- {0xa7, 0x56},
- {0xa9, 0x03},
- {0xaa, 0x66},
- {0xab, 0x03},
- {0xac, 0x76},
- {0xad, 0x03},
- {0xae, 0x85},
- {0xaf, 0x03},
- {0xb0, 0x90},
- {0xb1, 0x03},
- {0xb2, 0xcb},
- {0xb3, 0x00},
- {0xb4, 0xdf},
- {0xb5, 0x00},
- {0xb6, 0xe4},
- {0xb7, 0x00},
- {0xb8, 0xed},
- {0xb9, 0x00},
- {0xba, 0xf6},
- {0xbb, 0x00},
- {0xbc, 0xff},
- {0xbd, 0x01},
- {0xbe, 0x07},
- {0xbf, 0x01},
- {0xc0, 0x10},
- {0xc1, 0x01},
- {0xc2, 0x18},
- {0xc3, 0x01},
- {0xc4, 0x20},
- {0xc5, 0x01},
- {0xc6, 0x3d},
- {0xc7, 0x01},
- {0xc8, 0x56},
- {0xc9, 0x01},
- {0xca, 0x84},
- {0xcb, 0x01},
- {0xcc, 0xab},
- {0xcd, 0x01},
- {0xce, 0xec},
- {0xcf, 0x02},
- {0xd0, 0x22},
- {0xd1, 0x02},
- {0xd2, 0x23},
- {0xd3, 0x02},
- {0xd4, 0x55},
- {0xd5, 0x02},
- {0xd6, 0x8b},
- {0xd7, 0x02},
- {0xd8, 0xaf},
- {0xd9, 0x02},
- {0xda, 0xdf},
- {0xdb, 0x03},
- {0xdc, 0x01},
- {0xdd, 0x03},
- {0xde, 0x2c},
- {0xdf, 0x03},
- {0xe0, 0x39},
- {0xe1, 0x03},
- {0xe2, 0x47},
- {0xe3, 0x03},
- {0xe4, 0x56},
- {0xe5, 0x03},
- {0xe6, 0x66},
- {0xe7, 0x03},
- {0xe8, 0x76},
- {0xe9, 0x03},
- {0xea, 0x85},
- {0xeb, 0x03},
- {0xec, 0x90},
- {0xed, 0x03},
- {0xee, 0xcb},
- {0xef, 0x00},
- {0xf0, 0xbb},
- {0xf1, 0x00},
- {0xf2, 0xc0},
- {0xf3, 0x00},
- {0xf4, 0xcc},
- {0xf5, 0x00},
- {0xf6, 0xd6},
- {0xf7, 0x00},
- {0xf8, 0xe1},
- {0xf9, 0x00},
- {0xfa, 0xea},
+ {0xfb, {0x01}, 0x01},
+ {0x75, {0x00}, 0x01},
+ {0x76, {0xdf}, 0x01},
+ {0x77, {0x00}, 0x01},
+ {0x78, {0xe4}, 0x01},
+ {0x79, {0x00}, 0x01},
+ {0x7a, {0xed}, 0x01},
+ {0x7b, {0x00}, 0x01},
+ {0x7c, {0xf6}, 0x01},
+ {0x7d, {0x00}, 0x01},
+ {0x7e, {0xff}, 0x01},
+ {0x7f, {0x01}, 0x01},
+ {0x80, {0x07}, 0x01},
+ {0x81, {0x01}, 0x01},
+ {0x82, {0x10}, 0x01},
+ {0x83, {0x01}, 0x01},
+ {0x84, {0x18}, 0x01},
+ {0x85, {0x01}, 0x01},
+ {0x86, {0x20}, 0x01},
+ {0x87, {0x01}, 0x01},
+ {0x88, {0x3d}, 0x01},
+ {0x89, {0x01}, 0x01},
+ {0x8a, {0x56}, 0x01},
+ {0x8b, {0x01}, 0x01},
+ {0x8c, {0x84}, 0x01},
+ {0x8d, {0x01}, 0x01},
+ {0x8e, {0xab}, 0x01},
+ {0x8f, {0x01}, 0x01},
+ {0x90, {0xec}, 0x01},
+ {0x91, {0x02}, 0x01},
+ {0x92, {0x22}, 0x01},
+ {0x93, {0x02}, 0x01},
+ {0x94, {0x23}, 0x01},
+ {0x95, {0x02}, 0x01},
+ {0x96, {0x55}, 0x01},
+ {0x97, {0x02}, 0x01},
+ {0x98, {0x8b}, 0x01},
+ {0x99, {0x02}, 0x01},
+ {0x9a, {0xaf}, 0x01},
+ {0x9b, {0x02}, 0x01},
+ {0x9c, {0xdf}, 0x01},
+ {0x9d, {0x03}, 0x01},
+ {0x9e, {0x01}, 0x01},
+ {0x9f, {0x03}, 0x01},
+ {0xa0, {0x2c}, 0x01},
+ {0xa2, {0x03}, 0x01},
+ {0xa3, {0x39}, 0x01},
+ {0xa4, {0x03}, 0x01},
+ {0xa5, {0x47}, 0x01},
+ {0xa6, {0x03}, 0x01},
+ {0xa7, {0x56}, 0x01},
+ {0xa9, {0x03}, 0x01},
+ {0xaa, {0x66}, 0x01},
+ {0xab, {0x03}, 0x01},
+ {0xac, {0x76}, 0x01},
+ {0xad, {0x03}, 0x01},
+ {0xae, {0x85}, 0x01},
+ {0xaf, {0x03}, 0x01},
+ {0xb0, {0x90}, 0x01},
+ {0xb1, {0x03}, 0x01},
+ {0xb2, {0xcb}, 0x01},
+ {0xb3, {0x00}, 0x01},
+ {0xb4, {0xdf}, 0x01},
+ {0xb5, {0x00}, 0x01},
+ {0xb6, {0xe4}, 0x01},
+ {0xb7, {0x00}, 0x01},
+ {0xb8, {0xed}, 0x01},
+ {0xb9, {0x00}, 0x01},
+ {0xba, {0xf6}, 0x01},
+ {0xbb, {0x00}, 0x01},
+ {0xbc, {0xff}, 0x01},
+ {0xbd, {0x01}, 0x01},
+ {0xbe, {0x07}, 0x01},
+ {0xbf, {0x01}, 0x01},
+ {0xc0, {0x10}, 0x01},
+ {0xc1, {0x01}, 0x01},
+ {0xc2, {0x18}, 0x01},
+ {0xc3, {0x01}, 0x01},
+ {0xc4, {0x20}, 0x01},
+ {0xc5, {0x01}, 0x01},
+ {0xc6, {0x3d}, 0x01},
+ {0xc7, {0x01}, 0x01},
+ {0xc8, {0x56}, 0x01},
+ {0xc9, {0x01}, 0x01},
+ {0xca, {0x84}, 0x01},
+ {0xcb, {0x01}, 0x01},
+ {0xcc, {0xab}, 0x01},
+ {0xcd, {0x01}, 0x01},
+ {0xce, {0xec}, 0x01},
+ {0xcf, {0x02}, 0x01},
+ {0xd0, {0x22}, 0x01},
+ {0xd1, {0x02}, 0x01},
+ {0xd2, {0x23}, 0x01},
+ {0xd3, {0x02}, 0x01},
+ {0xd4, {0x55}, 0x01},
+ {0xd5, {0x02}, 0x01},
+ {0xd6, {0x8b}, 0x01},
+ {0xd7, {0x02}, 0x01},
+ {0xd8, {0xaf}, 0x01},
+ {0xd9, {0x02}, 0x01},
+ {0xda, {0xdf}, 0x01},
+ {0xdb, {0x03}, 0x01},
+ {0xdc, {0x01}, 0x01},
+ {0xdd, {0x03}, 0x01},
+ {0xde, {0x2c}, 0x01},
+ {0xdf, {0x03}, 0x01},
+ {0xe0, {0x39}, 0x01},
+ {0xe1, {0x03}, 0x01},
+ {0xe2, {0x47}, 0x01},
+ {0xe3, {0x03}, 0x01},
+ {0xe4, {0x56}, 0x01},
+ {0xe5, {0x03}, 0x01},
+ {0xe6, {0x66}, 0x01},
+ {0xe7, {0x03}, 0x01},
+ {0xe8, {0x76}, 0x01},
+ {0xe9, {0x03}, 0x01},
+ {0xea, {0x85}, 0x01},
+ {0xeb, {0x03}, 0x01},
+ {0xec, {0x90}, 0x01},
+ {0xed, {0x03}, 0x01},
+ {0xee, {0xcb}, 0x01},
+ {0xef, {0x00}, 0x01},
+ {0xf0, {0xbb}, 0x01},
+ {0xf1, {0x00}, 0x01},
+ {0xf2, {0xc0}, 0x01},
+ {0xf3, {0x00}, 0x01},
+ {0xf4, {0xcc}, 0x01},
+ {0xf5, {0x00}, 0x01},
+ {0xf6, {0xd6}, 0x01},
+ {0xf7, {0x00}, 0x01},
+ {0xf8, {0xe1}, 0x01},
+ {0xf9, {0x00}, 0x01},
+ {0xfa, {0xea}, 0x01},
/* Select CMD2 Page2 (Undocumented) */
- {0xff, 0x02},
+ {0xff, {0x02}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
- {0x00, 0x00},
- {0x01, 0xf4},
- {0x02, 0x00},
- {0x03, 0xef},
- {0x04, 0x01},
- {0x05, 0x07},
- {0x06, 0x01},
- {0x07, 0x28},
- {0x08, 0x01},
- {0x09, 0x44},
- {0x0a, 0x01},
- {0x0b, 0x76},
- {0x0c, 0x01},
- {0x0d, 0xa0},
- {0x0e, 0x01},
- {0x0f, 0xe7},
- {0x10, 0x02},
- {0x11, 0x1f},
- {0x12, 0x02},
- {0x13, 0x22},
- {0x14, 0x02},
- {0x15, 0x54},
- {0x16, 0x02},
- {0x17, 0x8b},
- {0x18, 0x02},
- {0x19, 0xaf},
- {0x1a, 0x02},
- {0x1b, 0xe0},
- {0x1c, 0x03},
- {0x1d, 0x01},
- {0x1e, 0x03},
- {0x1f, 0x2d},
- {0x20, 0x03},
- {0x21, 0x39},
- {0x22, 0x03},
- {0x23, 0x47},
- {0x24, 0x03},
- {0x25, 0x57},
- {0x26, 0x03},
- {0x27, 0x65},
- {0x28, 0x03},
- {0x29, 0x77},
- {0x2a, 0x03},
- {0x2b, 0x85},
- {0x2d, 0x03},
- {0x2f, 0x8f},
- {0x30, 0x03},
- {0x31, 0xcb},
- {0x32, 0x00},
- {0x33, 0xbb},
- {0x34, 0x00},
- {0x35, 0xc0},
- {0x36, 0x00},
- {0x37, 0xcc},
- {0x38, 0x00},
- {0x39, 0xd6},
- {0x3a, 0x00},
- {0x3b, 0xe1},
- {0x3d, 0x00},
- {0x3f, 0xea},
- {0x40, 0x00},
- {0x41, 0xf4},
- {0x42, 0x00},
- {0x43, 0xfe},
- {0x44, 0x01},
- {0x45, 0x07},
- {0x46, 0x01},
- {0x47, 0x28},
- {0x48, 0x01},
- {0x49, 0x44},
- {0x4a, 0x01},
- {0x4b, 0x76},
- {0x4c, 0x01},
- {0x4d, 0xa0},
- {0x4e, 0x01},
- {0x4f, 0xe7},
- {0x50, 0x02},
- {0x51, 0x1f},
- {0x52, 0x02},
- {0x53, 0x22},
- {0x54, 0x02},
- {0x55, 0x54},
- {0x56, 0x02},
- {0x58, 0x8b},
- {0x59, 0x02},
- {0x5a, 0xaf},
- {0x5b, 0x02},
- {0x5c, 0xe0},
- {0x5d, 0x03},
- {0x5e, 0x01},
- {0x5f, 0x03},
- {0x60, 0x2d},
- {0x61, 0x03},
- {0x62, 0x39},
- {0x63, 0x03},
- {0x64, 0x47},
- {0x65, 0x03},
- {0x66, 0x57},
- {0x67, 0x03},
- {0x68, 0x65},
- {0x69, 0x03},
- {0x6a, 0x77},
- {0x6b, 0x03},
- {0x6c, 0x85},
- {0x6d, 0x03},
- {0x6e, 0x8f},
- {0x6f, 0x03},
- {0x70, 0xcb},
- {0x71, 0x00},
- {0x72, 0x00},
- {0x73, 0x00},
- {0x74, 0x21},
- {0x75, 0x00},
- {0x76, 0x4c},
- {0x77, 0x00},
- {0x78, 0x6b},
- {0x79, 0x00},
- {0x7a, 0x85},
- {0x7b, 0x00},
- {0x7c, 0x9a},
- {0x7d, 0x00},
- {0x7e, 0xad},
- {0x7f, 0x00},
- {0x80, 0xbe},
- {0x81, 0x00},
- {0x82, 0xcd},
- {0x83, 0x01},
- {0x84, 0x01},
- {0x85, 0x01},
- {0x86, 0x29},
- {0x87, 0x01},
- {0x88, 0x68},
- {0x89, 0x01},
- {0x8a, 0x98},
- {0x8b, 0x01},
- {0x8c, 0xe5},
- {0x8d, 0x02},
- {0x8e, 0x1e},
- {0x8f, 0x02},
- {0x90, 0x30},
- {0x91, 0x02},
- {0x92, 0x52},
- {0x93, 0x02},
- {0x94, 0x88},
- {0x95, 0x02},
- {0x96, 0xaa},
- {0x97, 0x02},
- {0x98, 0xd7},
- {0x99, 0x02},
- {0x9a, 0xf7},
- {0x9b, 0x03},
- {0x9c, 0x21},
- {0x9d, 0x03},
- {0x9e, 0x2e},
- {0x9f, 0x03},
- {0xa0, 0x3d},
- {0xa2, 0x03},
- {0xa3, 0x4c},
- {0xa4, 0x03},
- {0xa5, 0x5e},
- {0xa6, 0x03},
- {0xa7, 0x71},
- {0xa9, 0x03},
- {0xaa, 0x86},
- {0xab, 0x03},
- {0xac, 0x94},
- {0xad, 0x03},
- {0xae, 0xfa},
- {0xaf, 0x00},
- {0xb0, 0x00},
- {0xb1, 0x00},
- {0xb2, 0x21},
- {0xb3, 0x00},
- {0xb4, 0x4c},
- {0xb5, 0x00},
- {0xb6, 0x6b},
- {0xb7, 0x00},
- {0xb8, 0x85},
- {0xb9, 0x00},
- {0xba, 0x9a},
- {0xbb, 0x00},
- {0xbc, 0xad},
- {0xbd, 0x00},
- {0xbe, 0xbe},
- {0xbf, 0x00},
- {0xc0, 0xcd},
- {0xc1, 0x01},
- {0xc2, 0x01},
- {0xc3, 0x01},
- {0xc4, 0x29},
- {0xc5, 0x01},
- {0xc6, 0x68},
- {0xc7, 0x01},
- {0xc8, 0x98},
- {0xc9, 0x01},
- {0xca, 0xe5},
- {0xcb, 0x02},
- {0xcc, 0x1e},
- {0xcd, 0x02},
- {0xce, 0x20},
- {0xcf, 0x02},
- {0xd0, 0x52},
- {0xd1, 0x02},
- {0xd2, 0x88},
- {0xd3, 0x02},
- {0xd4, 0xaa},
- {0xd5, 0x02},
- {0xd6, 0xd7},
- {0xd7, 0x02},
- {0xd8, 0xf7},
- {0xd9, 0x03},
- {0xda, 0x21},
- {0xdb, 0x03},
- {0xdc, 0x2e},
- {0xdd, 0x03},
- {0xde, 0x3d},
- {0xdf, 0x03},
- {0xe0, 0x4c},
- {0xe1, 0x03},
- {0xe2, 0x5e},
- {0xe3, 0x03},
- {0xe4, 0x71},
- {0xe5, 0x03},
- {0xe6, 0x86},
- {0xe7, 0x03},
- {0xe8, 0x94},
- {0xe9, 0x03},
- {0xea, 0xfa},
+ {0xfb, {0x01}, 0x01},
+ {0x00, {0x00}, 0x01},
+ {0x01, {0xf4}, 0x01},
+ {0x02, {0x00}, 0x01},
+ {0x03, {0xef}, 0x01},
+ {0x04, {0x01}, 0x01},
+ {0x05, {0x07}, 0x01},
+ {0x06, {0x01}, 0x01},
+ {0x07, {0x28}, 0x01},
+ {0x08, {0x01}, 0x01},
+ {0x09, {0x44}, 0x01},
+ {0x0a, {0x01}, 0x01},
+ {0x0b, {0x76}, 0x01},
+ {0x0c, {0x01}, 0x01},
+ {0x0d, {0xa0}, 0x01},
+ {0x0e, {0x01}, 0x01},
+ {0x0f, {0xe7}, 0x01},
+ {0x10, {0x02}, 0x01},
+ {0x11, {0x1f}, 0x01},
+ {0x12, {0x02}, 0x01},
+ {0x13, {0x22}, 0x01},
+ {0x14, {0x02}, 0x01},
+ {0x15, {0x54}, 0x01},
+ {0x16, {0x02}, 0x01},
+ {0x17, {0x8b}, 0x01},
+ {0x18, {0x02}, 0x01},
+ {0x19, {0xaf}, 0x01},
+ {0x1a, {0x02}, 0x01},
+ {0x1b, {0xe0}, 0x01},
+ {0x1c, {0x03}, 0x01},
+ {0x1d, {0x01}, 0x01},
+ {0x1e, {0x03}, 0x01},
+ {0x1f, {0x2d}, 0x01},
+ {0x20, {0x03}, 0x01},
+ {0x21, {0x39}, 0x01},
+ {0x22, {0x03}, 0x01},
+ {0x23, {0x47}, 0x01},
+ {0x24, {0x03}, 0x01},
+ {0x25, {0x57}, 0x01},
+ {0x26, {0x03}, 0x01},
+ {0x27, {0x65}, 0x01},
+ {0x28, {0x03}, 0x01},
+ {0x29, {0x77}, 0x01},
+ {0x2a, {0x03}, 0x01},
+ {0x2b, {0x85}, 0x01},
+ {0x2d, {0x03}, 0x01},
+ {0x2f, {0x8f}, 0x01},
+ {0x30, {0x03}, 0x01},
+ {0x31, {0xcb}, 0x01},
+ {0x32, {0x00}, 0x01},
+ {0x33, {0xbb}, 0x01},
+ {0x34, {0x00}, 0x01},
+ {0x35, {0xc0}, 0x01},
+ {0x36, {0x00}, 0x01},
+ {0x37, {0xcc}, 0x01},
+ {0x38, {0x00}, 0x01},
+ {0x39, {0xd6}, 0x01},
+ {0x3a, {0x00}, 0x01},
+ {0x3b, {0xe1}, 0x01},
+ {0x3d, {0x00}, 0x01},
+ {0x3f, {0xea}, 0x01},
+ {0x40, {0x00}, 0x01},
+ {0x41, {0xf4}, 0x01},
+ {0x42, {0x00}, 0x01},
+ {0x43, {0xfe}, 0x01},
+ {0x44, {0x01}, 0x01},
+ {0x45, {0x07}, 0x01},
+ {0x46, {0x01}, 0x01},
+ {0x47, {0x28}, 0x01},
+ {0x48, {0x01}, 0x01},
+ {0x49, {0x44}, 0x01},
+ {0x4a, {0x01}, 0x01},
+ {0x4b, {0x76}, 0x01},
+ {0x4c, {0x01}, 0x01},
+ {0x4d, {0xa0}, 0x01},
+ {0x4e, {0x01}, 0x01},
+ {0x4f, {0xe7}, 0x01},
+ {0x50, {0x02}, 0x01},
+ {0x51, {0x1f}, 0x01},
+ {0x52, {0x02}, 0x01},
+ {0x53, {0x22}, 0x01},
+ {0x54, {0x02}, 0x01},
+ {0x55, {0x54}, 0x01},
+ {0x56, {0x02}, 0x01},
+ {0x58, {0x8b}, 0x01},
+ {0x59, {0x02}, 0x01},
+ {0x5a, {0xaf}, 0x01},
+ {0x5b, {0x02}, 0x01},
+ {0x5c, {0xe0}, 0x01},
+ {0x5d, {0x03}, 0x01},
+ {0x5e, {0x01}, 0x01},
+ {0x5f, {0x03}, 0x01},
+ {0x60, {0x2d}, 0x01},
+ {0x61, {0x03}, 0x01},
+ {0x62, {0x39}, 0x01},
+ {0x63, {0x03}, 0x01},
+ {0x64, {0x47}, 0x01},
+ {0x65, {0x03}, 0x01},
+ {0x66, {0x57}, 0x01},
+ {0x67, {0x03}, 0x01},
+ {0x68, {0x65}, 0x01},
+ {0x69, {0x03}, 0x01},
+ {0x6a, {0x77}, 0x01},
+ {0x6b, {0x03}, 0x01},
+ {0x6c, {0x85}, 0x01},
+ {0x6d, {0x03}, 0x01},
+ {0x6e, {0x8f}, 0x01},
+ {0x6f, {0x03}, 0x01},
+ {0x70, {0xcb}, 0x01},
+ {0x71, {0x00}, 0x01},
+ {0x72, {0x00}, 0x01},
+ {0x73, {0x00}, 0x01},
+ {0x74, {0x21}, 0x01},
+ {0x75, {0x00}, 0x01},
+ {0x76, {0x4c}, 0x01},
+ {0x77, {0x00}, 0x01},
+ {0x78, {0x6b}, 0x01},
+ {0x79, {0x00}, 0x01},
+ {0x7a, {0x85}, 0x01},
+ {0x7b, {0x00}, 0x01},
+ {0x7c, {0x9a}, 0x01},
+ {0x7d, {0x00}, 0x01},
+ {0x7e, {0xad}, 0x01},
+ {0x7f, {0x00}, 0x01},
+ {0x80, {0xbe}, 0x01},
+ {0x81, {0x00}, 0x01},
+ {0x82, {0xcd}, 0x01},
+ {0x83, {0x01}, 0x01},
+ {0x84, {0x01}, 0x01},
+ {0x85, {0x01}, 0x01},
+ {0x86, {0x29}, 0x01},
+ {0x87, {0x01}, 0x01},
+ {0x88, {0x68}, 0x01},
+ {0x89, {0x01}, 0x01},
+ {0x8a, {0x98}, 0x01},
+ {0x8b, {0x01}, 0x01},
+ {0x8c, {0xe5}, 0x01},
+ {0x8d, {0x02}, 0x01},
+ {0x8e, {0x1e}, 0x01},
+ {0x8f, {0x02}, 0x01},
+ {0x90, {0x30}, 0x01},
+ {0x91, {0x02}, 0x01},
+ {0x92, {0x52}, 0x01},
+ {0x93, {0x02}, 0x01},
+ {0x94, {0x88}, 0x01},
+ {0x95, {0x02}, 0x01},
+ {0x96, {0xaa}, 0x01},
+ {0x97, {0x02}, 0x01},
+ {0x98, {0xd7}, 0x01},
+ {0x99, {0x02}, 0x01},
+ {0x9a, {0xf7}, 0x01},
+ {0x9b, {0x03}, 0x01},
+ {0x9c, {0x21}, 0x01},
+ {0x9d, {0x03}, 0x01},
+ {0x9e, {0x2e}, 0x01},
+ {0x9f, {0x03}, 0x01},
+ {0xa0, {0x3d}, 0x01},
+ {0xa2, {0x03}, 0x01},
+ {0xa3, {0x4c}, 0x01},
+ {0xa4, {0x03}, 0x01},
+ {0xa5, {0x5e}, 0x01},
+ {0xa6, {0x03}, 0x01},
+ {0xa7, {0x71}, 0x01},
+ {0xa9, {0x03}, 0x01},
+ {0xaa, {0x86}, 0x01},
+ {0xab, {0x03}, 0x01},
+ {0xac, {0x94}, 0x01},
+ {0xad, {0x03}, 0x01},
+ {0xae, {0xfa}, 0x01},
+ {0xaf, {0x00}, 0x01},
+ {0xb0, {0x00}, 0x01},
+ {0xb1, {0x00}, 0x01},
+ {0xb2, {0x21}, 0x01},
+ {0xb3, {0x00}, 0x01},
+ {0xb4, {0x4c}, 0x01},
+ {0xb5, {0x00}, 0x01},
+ {0xb6, {0x6b}, 0x01},
+ {0xb7, {0x00}, 0x01},
+ {0xb8, {0x85}, 0x01},
+ {0xb9, {0x00}, 0x01},
+ {0xba, {0x9a}, 0x01},
+ {0xbb, {0x00}, 0x01},
+ {0xbc, {0xad}, 0x01},
+ {0xbd, {0x00}, 0x01},
+ {0xbe, {0xbe}, 0x01},
+ {0xbf, {0x00}, 0x01},
+ {0xc0, {0xcd}, 0x01},
+ {0xc1, {0x01}, 0x01},
+ {0xc2, {0x01}, 0x01},
+ {0xc3, {0x01}, 0x01},
+ {0xc4, {0x29}, 0x01},
+ {0xc5, {0x01}, 0x01},
+ {0xc6, {0x68}, 0x01},
+ {0xc7, {0x01}, 0x01},
+ {0xc8, {0x98}, 0x01},
+ {0xc9, {0x01}, 0x01},
+ {0xca, {0xe5}, 0x01},
+ {0xcb, {0x02}, 0x01},
+ {0xcc, {0x1e}, 0x01},
+ {0xcd, {0x02}, 0x01},
+ {0xce, {0x20}, 0x01},
+ {0xcf, {0x02}, 0x01},
+ {0xd0, {0x52}, 0x01},
+ {0xd1, {0x02}, 0x01},
+ {0xd2, {0x88}, 0x01},
+ {0xd3, {0x02}, 0x01},
+ {0xd4, {0xaa}, 0x01},
+ {0xd5, {0x02}, 0x01},
+ {0xd6, {0xd7}, 0x01},
+ {0xd7, {0x02}, 0x01},
+ {0xd8, {0xf7}, 0x01},
+ {0xd9, {0x03}, 0x01},
+ {0xda, {0x21}, 0x01},
+ {0xdb, {0x03}, 0x01},
+ {0xdc, {0x2e}, 0x01},
+ {0xdd, {0x03}, 0x01},
+ {0xde, {0x3d}, 0x01},
+ {0xdf, {0x03}, 0x01},
+ {0xe0, {0x4c}, 0x01},
+ {0xe1, {0x03}, 0x01},
+ {0xe2, {0x5e}, 0x01},
+ {0xe3, {0x03}, 0x01},
+ {0xe4, {0x71}, 0x01},
+ {0xe5, {0x03}, 0x01},
+ {0xe6, {0x86}, 0x01},
+ {0xe7, {0x03}, 0x01},
+ {0xe8, {0x94}, 0x01},
+ {0xe9, {0x03}, 0x01},
+ {0xea, {0xfa}, 0x01},
/* Select CMD2 Page0 (Undocumented) */
- {0xff, 0x01},
+ {0xff, {0x01}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD2 Page1 (Undocumented) */
- {0xff, 0x02},
+ {0xff, {0x02}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD2 Page3 (Undocumented) */
- {0xff, 0x04},
+ {0xff, {0x04}, 0x01},
/* Reload CMD1: Don't reload default value to register */
- {0xfb, 0x01},
+ {0xfb, {0x01}, 0x01},
/* Select CMD1 */
- {0xff, 0x00},
- {0xd3, 0x22}, /* RGBMIPICTRL: VSYNC back porch = 34 */
- {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+ {0xff, {0x00}, 0x01},
+ {0xd3, {0x22}, 0x01}, /* RGBMIPICTRL: VSYNC back porch = 34 */
+ {0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+struct khadas_ts050_panel_data ts050_panel_data = {
+ .init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code,
+ .len = ARRAY_SIZE(ts050_init_code)
+};
+
+struct khadas_ts050_panel_data ts050v2_panel_data = {
+ .init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code,
+ .len = ARRAY_SIZE(ts050v2_init_code)
};
static inline
@@ -613,10 +671,11 @@ static int khadas_ts050_panel_prepare(struct drm_panel *panel)
msleep(100);
- for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ for (i = 0; i < khadas_ts050->panel_data->len; i++) {
err = mipi_dsi_dcs_write(khadas_ts050->link,
- init_code[i].cmd,
- &init_code[i].data, 1);
+ khadas_ts050->panel_data->init_code[i].cmd,
+ &khadas_ts050->panel_data->init_code[i].data,
+ khadas_ts050->panel_data->init_code[i].size);
if (err < 0) {
dev_err(panel->dev, "failed write cmds: %d\n", err);
goto poweroff;
@@ -762,7 +821,8 @@ static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
};
static const struct of_device_id khadas_ts050_of_match[] = {
- { .compatible = "khadas,ts050", },
+ { .compatible = "khadas,ts050", .data = &ts050_panel_data, },
+ { .compatible = "khadas,ts050v2", .data = &ts050v2_panel_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
@@ -806,6 +866,13 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
struct khadas_ts050_panel *khadas_ts050;
int err;
+ const void *data = of_device_get_match_data(&dsi->dev);
+
+ if (!data) {
+ dev_err(&dsi->dev, "No matching data\n");
+ return -ENODEV;
+ }
+
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -816,6 +883,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
if (!khadas_ts050)
return -ENOMEM;
+ khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data;
mipi_dsi_set_drvdata(dsi, khadas_ts050);
khadas_ts050->link = dsi;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 9d87cc1a357e..1a26205701b5 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x29);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
@@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
new file mode 100644
index 000000000000..115f4702d59f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2024 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ * Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+
+#define NUM_SUPPLIES 2
+
+struct sw43408_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator_bulk_data supplies[NUM_SUPPLIES];
+
+ struct gpio_desc *reset_gpio;
+
+ struct drm_dsc_config dsc;
+};
+
+static inline struct sw43408_panel *to_panel_info(struct drm_panel *panel)
+{
+ return container_of(panel, struct sw43408_panel, base);
+}
+
+static int sw43408_unprepare(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->link);
+ if (ret < 0)
+ dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->link);
+ if (ret < 0)
+ dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
+
+ msleep(100);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int sw43408_program(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ struct drm_dsc_picture_parameter_set pps;
+
+ mipi_dsi_dcs_write_seq(ctx->link, MIPI_DCS_SET_GAMMA_CURVE, 0x02);
+
+ mipi_dsi_dcs_set_tear_on(ctx->link, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq(ctx->link, 0x53, 0x0c, 0x30);
+ mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x00, 0x70, 0xdf, 0x00, 0x70, 0xdf);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xf7, 0x01, 0x49, 0x0c);
+
+ mipi_dsi_dcs_exit_sleep_mode(ctx->link);
+
+ msleep(135);
+
+ /* COMPRESSION_MODE moved after setting the PPS */
+
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb0, 0xac);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xe5,
+ 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x0e, 0x10);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb5,
+ 0x75, 0x60, 0x2d, 0x5d, 0x80, 0x00, 0x0a, 0x0b,
+ 0x00, 0x05, 0x0b, 0x00, 0x80, 0x0d, 0x0e, 0x40,
+ 0x00, 0x0c, 0x00, 0x16, 0x00, 0xb8, 0x00, 0x80,
+ 0x0d, 0x0e, 0x40, 0x00, 0x0c, 0x00, 0x16, 0x00,
+ 0xb8, 0x00, 0x81, 0x00, 0x03, 0x03, 0x03, 0x01,
+ 0x01);
+ msleep(85);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xcd,
+ 0x00, 0x00, 0x00, 0x19, 0x19, 0x19, 0x19, 0x19,
+ 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19,
+ 0x16, 0x16);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xcb, 0x80, 0x5c, 0x07, 0x03, 0x28);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xc0, 0x02, 0x02, 0x0f);
+ mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x04, 0x61, 0xdb, 0x04, 0x70, 0xdb);
+ mipi_dsi_dcs_write_seq(ctx->link, 0xb0, 0xca);
+
+ mipi_dsi_dcs_set_display_on(ctx->link);
+
+ msleep(50);
+
+ ctx->link->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ drm_dsc_pps_payload_pack(&pps, ctx->link->dsc);
+ mipi_dsi_picture_parameter_set(ctx->link, &pps);
+
+ ctx->link->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ /*
+ * This panel uses PPS selectors with offset:
+ * PPS 1 if pps_identifier is 0
+ * PPS 2 if pps_identifier is 1
+ */
+ mipi_dsi_compression_mode_ext(ctx->link, true,
+ MIPI_DSI_COMPRESSION_DSC, 1);
+
+ return 0;
+}
+
+static int sw43408_prepare(struct drm_panel *panel)
+{
+ struct sw43408_panel *ctx = to_panel_info(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(9000, 10000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(9000, 10000);
+
+ ret = sw43408_program(panel);
+ if (ret)
+ goto poweroff;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+}
+
+static const struct drm_display_mode sw43408_mode = {
+ .clock = (1080 + 20 + 32 + 20) * (2160 + 20 + 4 + 20) * 60 / 1000,
+
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 32,
+ .htotal = 1080 + 20 + 32 + 20,
+
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 20,
+ .vsync_end = 2160 + 20 + 4,
+ .vtotal = 2160 + 20 + 4 + 20,
+
+ .width_mm = 62,
+ .height_mm = 124,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int sw43408_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &sw43408_mode);
+}
+
+static int sw43408_backlight_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+
+ return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+}
+
+const struct backlight_ops sw43408_backlight_ops = {
+ .update_status = sw43408_backlight_update_status,
+};
+
+static int sw43408_backlight_init(struct sw43408_panel *ctx)
+{
+ struct device *dev = &ctx->link->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_PLATFORM,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ ctx->base.backlight = devm_backlight_device_register(dev, dev_name(dev), dev,
+ ctx->link,
+ &sw43408_backlight_ops,
+ &props);
+
+ if (IS_ERR(ctx->base.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->base.backlight),
+ "Failed to create backlight\n");
+
+ return 0;
+}
+
+static const struct drm_panel_funcs sw43408_funcs = {
+ .unprepare = sw43408_unprepare,
+ .prepare = sw43408_prepare,
+ .get_modes = sw43408_get_modes,
+};
+
+static const struct of_device_id sw43408_of_match[] = {
+ { .compatible = "lg,sw43408", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sw43408_of_match);
+
+static int sw43408_add(struct sw43408_panel *ctx)
+{
+ struct device *dev = &ctx->link->dev;
+ int ret;
+
+ ctx->supplies[0].supply = "vddi"; /* 1.88 V */
+ ctx->supplies[0].init_load_uA = 62000;
+ ctx->supplies[1].supply = "vpnl"; /* 3.0 V */
+ ctx->supplies[1].init_load_uA = 857000;
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ return dev_err_probe(dev, ret, "cannot get reset gpio\n");
+ }
+
+ ret = sw43408_backlight_init(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->base.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ drm_panel_add(&ctx->base);
+ return ret;
+}
+
+static int sw43408_probe(struct mipi_dsi_device *dsi)
+{
+ struct sw43408_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ dsi->mode_flags = MIPI_DSI_MODE_LPM;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ctx->link = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ret = sw43408_add(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* The panel works only in the DSC mode. Set DSC params. */
+ ctx->dsc.dsc_version_major = 0x1;
+ ctx->dsc.dsc_version_minor = 0x1;
+
+ /* slice_count * slice_width == width */
+ ctx->dsc.slice_height = 16;
+ ctx->dsc.slice_width = 540;
+ ctx->dsc.slice_count = 2;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4;
+ ctx->dsc.block_pred_enable = true;
+
+ dsi->dsc = &ctx->dsc;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static void sw43408_remove(struct mipi_dsi_device *dsi)
+{
+ struct sw43408_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = sw43408_unprepare(&ctx->base);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->base);
+}
+
+static struct mipi_dsi_driver sw43408_driver = {
+ .driver = {
+ .name = "panel-lg-sw43408",
+ .of_match_table = sw43408_of_match,
+ },
+ .probe = sw43408_probe,
+ .remove = sw43408_remove,
+};
+module_mipi_dsi_driver(sw43408_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_DESCRIPTION("LG SW436408 MIPI-DSI LED panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 648ce9201426..028fdac293f7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -556,10 +556,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
}
dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
of_node_put(dsi_r);
- if (!dsi_r_host) {
- dev_err(dev, "Cannot get secondary DSI host\n");
- return -EPROBE_DEFER;
- }
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
if (!nt->dsi[1]) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 33fb3d715e54..3886372415c2 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -605,21 +605,16 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
struct device *dev = &pinfo->link->dev;
int i, ret;
- for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
pinfo->supplies[i].supply = nt36672a_regulator_names[i];
+ pinfo->supplies[i].init_load_uA = nt36672a_regulator_enable_loads[i];
+ }
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
pinfo->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
- ret = regulator_set_load(pinfo->supplies[i].consumer,
- nt36672a_regulator_enable_loads[i]);
- if (ret)
- return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
- }
-
pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pinfo->reset_gpio))
return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index cb7406d74466..20b7bfe4aa12 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -25,12 +25,6 @@ static const unsigned long regulator_enable_loads[] = {
100000,
};
-static const unsigned long regulator_disable_loads[] = {
- 80,
- 100,
- 100,
-};
-
struct panel_desc {
const struct drm_display_mode *display_mode;
u32 width_mm;
@@ -349,17 +343,7 @@ static int nt36672e_1080x2408_60hz_init(struct mipi_dsi_device *dsi)
static int nt36672e_power_on(struct nt36672e_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
- ret = regulator_set_load(ctx->supplies[i].consumer,
- regulator_enable_loads[i]);
- if (ret) {
- dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
- ctx->supplies[i].supply, ret);
- return ret;
- }
- }
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
@@ -385,20 +369,9 @@ static int nt36672e_power_off(struct nt36672e_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
int ret = 0;
- int i;
gpiod_set_value(ctx->reset_gpio, 0);
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
- ret = regulator_set_load(ctx->supplies[i].consumer,
- regulator_disable_loads[i]);
- if (ret) {
- dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
- ctx->supplies[i].supply, ret);
- return ret;
- }
- }
-
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
dev_err(&dsi->dev, "regulator bulk disable failed: %d\n", ret);
@@ -567,8 +540,10 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
return -ENODEV;
}
- for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
ctx->supplies[i].supply = regulator_names[i];
+ ctx->supplies[i].init_load_uA = regulator_enable_loads[i];
+ }
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -614,8 +589,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
new file mode 100644
index 000000000000..4dca6802faef
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2024 David Wronek <david@mainlining.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct rm69380_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+};
+
+static inline
+struct rm69380_panel *to_rm69380_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rm69380_panel, panel);
+}
+
+static void rm69380_reset(struct rm69380_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(15000, 16000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int rm69380_on(struct rm69380_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi[0];
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (ctx->dsi[1])
+ ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd4);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq(dsi, 0x48, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x26);
+ mipi_dsi_dcs_write_seq(dsi, 0x75, 0x3f);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1a);
+ mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28);
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x08);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(36);
+
+ return 0;
+}
+
+static int rm69380_off(struct rm69380_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi[0];
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (ctx->dsi[1])
+ ctx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int rm69380_prepare(struct drm_panel *panel)
+{
+ struct rm69380_panel *ctx = to_rm69380_panel(panel);
+ struct device *dev = &ctx->dsi[0]->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ rm69380_reset(ctx);
+
+ ret = rm69380_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rm69380_unprepare(struct drm_panel *panel)
+{
+ struct rm69380_panel *ctx = to_rm69380_panel(panel);
+ struct device *dev = &ctx->dsi[0]->dev;
+ int ret;
+
+ ret = rm69380_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode rm69380_mode = {
+ .clock = (2560 + 32 + 12 + 38) * (1600 + 20 + 4 + 8) * 90 / 1000,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 32,
+ .hsync_end = 2560 + 32 + 12,
+ .htotal = 2560 + 32 + 12 + 38,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 20,
+ .vsync_end = 1600 + 20 + 4,
+ .vtotal = 1600 + 20 + 4 + 8,
+ .width_mm = 248,
+ .height_mm = 155,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int rm69380_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &rm69380_mode);
+}
+
+static const struct drm_panel_funcs rm69380_panel_funcs = {
+ .prepare = rm69380_prepare,
+ .unprepare = rm69380_unprepare,
+ .get_modes = rm69380_get_modes,
+};
+
+static int rm69380_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int rm69380_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops rm69380_bl_ops = {
+ .update_status = rm69380_bl_update_status,
+ .get_brightness = rm69380_bl_get_brightness,
+};
+
+static struct backlight_device *
+rm69380_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 511,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &rm69380_bl_ops, &props);
+}
+
+static int rm69380_probe(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_host *dsi_sec_host;
+ struct rm69380_panel *ctx;
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_sec;
+ int ret, i;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "avdd";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ dsi_sec = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+
+ if (dsi_sec) {
+ const struct mipi_dsi_device_info info = { "RM69380 DSI1", 0,
+ dsi_sec };
+
+ dsi_sec_host = of_find_mipi_dsi_host_by_node(dsi_sec);
+ of_node_put(dsi_sec);
+ if (!dsi_sec_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ ctx->dsi[1] =
+ devm_mipi_dsi_device_register_full(dev, dsi_sec_host, &info);
+ if (IS_ERR(ctx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(ctx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+
+ mipi_dsi_set_drvdata(ctx->dsi[1], ctx);
+ }
+
+ ctx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = rm69380_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ if (!ctx->dsi[i])
+ continue;
+
+ dev_dbg(&ctx->dsi[i]->dev, "Binding DSI %d\n", i);
+
+ ctx->dsi[i]->lanes = 4;
+ ctx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+ ctx->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = devm_mipi_dsi_attach(dev, ctx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret,
+ "Failed to attach to DSI%d\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void rm69380_remove(struct mipi_dsi_device *dsi)
+{
+ struct rm69380_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id rm69380_of_match[] = {
+ { .compatible = "lenovo,j716f-edo-rm69380" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rm69380_of_match);
+
+static struct mipi_dsi_driver rm69380_panel_driver = {
+ .probe = rm69380_probe,
+ .remove = rm69380_remove,
+ .driver = {
+ .name = "panel-raydium-rm69380",
+ .of_match_table = rm69380_of_match,
+ },
+};
+module_mipi_dsi_driver(rm69380_panel_driver);
+
+MODULE_AUTHOR("David Wronek <david@mainlining.org");
+MODULE_DESCRIPTION("DRM driver for Raydium RM69380-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 76c2a8f6718c..a9f0d214a900 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -36,7 +36,7 @@ struct atana33xc20_panel {
struct gpio_desc *el_on3_gpio;
struct drm_dp_aux *aux;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
ktime_t powered_off_time;
ktime_t powered_on_time;
@@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
if (hpd_asserted < 0)
ret = hpd_asserted;
- if (ret)
+ if (ret) {
dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
-
- return ret;
- }
-
- if (p->aux->wait_hpd_asserted) {
+ goto error;
+ }
+ } else if (p->aux->wait_hpd_asserted) {
ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
- if (ret)
+ if (ret) {
dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
-
- return ret;
+ goto error;
+ }
}
/*
@@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
* right times.
*/
return 0;
+
+error:
+ drm_dp_dpcd_set_powered(p->aux, false);
+ regulator_disable(p->supply);
+
+ return ret;
}
static int atana33xc20_disable(struct drm_panel *panel)
@@ -249,9 +253,12 @@ static int atana33xc20_get_modes(struct drm_panel *panel,
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, &aux_ep->aux->ddc);
- num = drm_add_edid_modes(connector, p->edid);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, &aux_ep->aux->ddc);
+
+ drm_edid_connector_update(connector, p->drm_edid);
+
+ num = drm_edid_connector_add_modes(connector);
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
@@ -324,9 +331,14 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+
+ /*
+ * Warn if we get an error, but don't consider it fatal. Having
+ * a panel where we can't control the backlight is better than
+ * no panel.
+ */
if (ret)
- return dev_err_probe(dev, ret,
- "failed to register dp aux backlight\n");
+ dev_warn(dev, "failed to register dp aux backlight: %d\n", ret);
drm_panel_add(&panel->base);
@@ -342,7 +354,7 @@ static void atana33xc20_remove(struct dp_aux_ep_device *aux_ep)
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
- kfree(panel->edid);
+ drm_edid_free(panel->drm_edid);
}
static void atana33xc20_shutdown(struct dp_aux_ep_device *aux_ep)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
new file mode 100644
index 000000000000..10bc8fb5f1f9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Samsung S6E3FA7 panel.
+ *
+ * Copyright (c) 2022-2024, The Linux Foundation. All rights reserved.
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e3fa7_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct s6e3fa7_panel *to_s6e3fa7_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e3fa7_panel, panel);
+}
+
+static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4,
+ 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0,
+ 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e3fa7_panel_prepare(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ s6e3fa7_panel_reset(ctx);
+
+ ret = s6e3fa7_panel_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e3fa7_panel_unprepare(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ return 0;
+}
+
+static int s6e3fa7_panel_disable(struct drm_panel *panel)
+{
+ struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static const struct drm_display_mode s6e3fa7_panel_mode = {
+ .clock = (1080 + 32 + 32 + 78) * (2220 + 32 + 4 + 78) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 32,
+ .htotal = 1080 + 32 + 32 + 78,
+ .vdisplay = 2220,
+ .vsync_start = 2220 + 32,
+ .vsync_end = 2220 + 32 + 4,
+ .vtotal = 2220 + 32 + 4 + 78,
+ .width_mm = 62,
+ .height_mm = 127,
+};
+
+static int s6e3fa7_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e3fa7_panel_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e3fa7_panel_funcs = {
+ .prepare = s6e3fa7_panel_prepare,
+ .unprepare = s6e3fa7_panel_unprepare,
+ .disable = s6e3fa7_panel_disable,
+ .get_modes = s6e3fa7_panel_get_modes,
+};
+
+static int s6e3fa7_panel_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int s6e3fa7_panel_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ return brightness;
+}
+
+static const struct backlight_ops s6e3fa7_panel_bl_ops = {
+ .update_status = s6e3fa7_panel_bl_update_status,
+ .get_brightness = s6e3fa7_panel_bl_get_brightness,
+};
+
+static struct backlight_device *
+s6e3fa7_panel_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 1023,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &s6e3fa7_panel_bl_ops, &props);
+}
+
+static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e3fa7_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s6e3fa7_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e3fa7_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e3fa7_panel_of_match[] = {
+ { .compatible = "samsung,s6e3fa7-ams559nk06" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e3fa7_panel_of_match);
+
+static struct mipi_dsi_driver s6e3fa7_panel_driver = {
+ .probe = s6e3fa7_panel_probe,
+ .remove = s6e3fa7_panel_remove,
+ .driver = {
+ .name = "panel-samsung-s6e3fa7",
+ .of_match_table = s6e3fa7_panel_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3fa7_panel_driver);
+
+MODULE_AUTHOR("Richard Acayan <mailingradian@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Samsung S6E3FA7 command mode DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 20e3df1c59d4..dcb6d0b6ced0 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -151,7 +151,7 @@ struct panel_simple {
struct gpio_desc *enable_gpio;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct drm_display_mode override_mode;
@@ -309,8 +309,8 @@ static int panel_simple_suspend(struct device *dev)
regulator_disable(p->supply);
p->unprepared_time = ktime_get_boottime();
- kfree(p->edid);
- p->edid = NULL;
+ drm_edid_free(p->drm_edid);
+ p->drm_edid = NULL;
return 0;
}
@@ -399,11 +399,12 @@ static int panel_simple_get_modes(struct drm_panel *panel,
if (p->ddc) {
pm_runtime_get_sync(panel->dev);
- if (!p->edid)
- p->edid = drm_get_edid(connector, p->ddc);
+ if (!p->drm_edid)
+ p->drm_edid = drm_edid_read_ddc(connector, p->ddc);
- if (p->edid)
- num += drm_add_edid_modes(connector, p->edid);
+ drm_edid_connector_update(connector, p->drm_edid);
+
+ num += drm_edid_connector_add_modes(connector);
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
@@ -1457,6 +1458,32 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing cct_cmt430b19n00_timing = {
+ .pixelclock = { 8000000, 9000000, 12000000 },
+ .hactive = { 480, 480, 480 },
+ .hfront_porch = { 2, 8, 75 },
+ .hback_porch = { 3, 43, 43 },
+ .hsync_len = { 2, 4, 75 },
+ .vactive = { 272, 272, 272 },
+ .vfront_porch = { 2, 8, 37 },
+ .vback_porch = { 2, 12, 12 },
+ .vsync_len = { 2, 4, 37 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW
+};
+
+static const struct panel_desc cct_cmt430b19n00 = {
+ .timings = &cct_cmt430b19n00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 53,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2565,22 +2592,22 @@ static const struct panel_desc innolux_g121i1_l01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode innolux_g121x1_l03_mode = {
- .clock = 65000,
- .hdisplay = 1024,
- .hsync_start = 1024 + 0,
- .hsync_end = 1024 + 1,
- .htotal = 1024 + 0 + 1 + 320,
- .vdisplay = 768,
- .vsync_start = 768 + 38,
- .vsync_end = 768 + 38 + 1,
- .vtotal = 768 + 38 + 1 + 0,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+static const struct display_timing innolux_g121x1_l03_timings = {
+ .pixelclock = { 57500000, 64900000, 74400000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 90, 140, 190 },
+ .hback_porch = { 90, 140, 190 },
+ .hsync_len = { 36, 40, 60 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 2, 15, 30 },
+ .vback_porch = { 2, 15, 30 },
+ .vsync_len = { 2, 8, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc innolux_g121x1_l03 = {
- .modes = &innolux_g121x1_l03_mode,
- .num_modes = 1,
+ .timings = &innolux_g121x1_l03_timings,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 246,
@@ -2591,6 +2618,27 @@ static const struct panel_desc innolux_g121x1_l03 = {
.unprepare = 200,
.disable = 400,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc innolux_g121xce_l01 = {
+ .timings = &innolux_g121x1_l03_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 246,
+ .height = 185,
+ },
+ .delay = {
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 400,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g156hce_l01_timings = {
@@ -3465,6 +3513,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
+ .clock = 66500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 12,
+ .hsync_end = 1280 + 12 + 20,
+ .htotal = 1280 + 12 + 20 + 56,
+ .vdisplay = 800,
+ .vsync_start = 800 + 1,
+ .vsync_end = 800 + 1 + 3,
+ .vtotal = 800 + 1 + 3 + 20,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t006_zhc01 = {
+ .modes = &powertip_ph128800t006_zhc01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.clock = 24750,
.hdisplay = 800,
@@ -4403,6 +4477,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
+ .compatible = "cct,cmt430b19n00",
+ .data = &cct_cmt430b19n00,
+ }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -4538,6 +4615,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
+ .compatible = "innolux,g121xce-l01",
+ .data = &innolux_g121xce_l01,
+ }, {
.compatible = "innolux,g156hce-l01",
.data = &innolux_g156hce_l01,
}, {
@@ -4640,6 +4720,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t006-zhc01",
+ .data = &powertip_ph128800t006_zhc01,
+ }, {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index a3e142f156d5..7d8302cca091 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -612,6 +612,92 @@ static const struct st7703_panel_desc rgb10max3_panel_desc = {
.init_sequence = rgb10max3_panel_init_sequence,
};
+static int gameforcechi_init_sequence(struct st7703 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /*
+ * Init sequence was supplied by the panel vendor. Panel will not
+ * respond to commands until it is brought out of sleep mode first.
+ */
+
+ mipi_dsi_dcs_exit_sleep_mode(dsi);
+ msleep(250);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x31, 0x81, 0x05, 0xf9,
+ 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00,
+ 0x00, 0x02, 0x4f, 0xd1, 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x0c, 0x10, 0x0a,
+ 0x50, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50,
+ 0x00, 0x00, 0x08, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0x00, 0x13, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x07, 0x07, 0x0b, 0x0b,
+ 0x03, 0x0b, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00,
+ 0xc0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x53, 0x00, 0x1e,
+ 0x1e, 0x77, 0xe1, 0xcc, 0xdd, 0x67, 0x77, 0x33,
+ 0x33);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x10, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x6c, 0x7c);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0x08, 0x00, 0x0e, 0x00,
+ 0x00, 0xb0, 0xb1, 0x11, 0x31, 0x23, 0x28, 0x10,
+ 0xb0, 0xb1, 0x27, 0x08, 0x00, 0x04, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00,
+ 0x88, 0x88, 0xba, 0x60, 0x24, 0x08, 0x88, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xba, 0x71, 0x35,
+ 0x18, 0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x97, 0x0a, 0x82, 0x02,
+ 0x13, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x88, 0xba, 0x17, 0x53, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0x88, 0x81, 0x88, 0xba, 0x06, 0x42,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x23, 0x10,
+ 0x00, 0x02, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x07, 0x0b,
+ 0x27, 0x2d, 0x3f, 0x3b, 0x37, 0x05, 0x0a, 0x0b,
+ 0x0f, 0x11, 0x0f, 0x12, 0x12, 0x18, 0x00, 0x07,
+ 0x0b, 0x27, 0x2d, 0x3f, 0x3b, 0x37, 0x05, 0xa0,
+ 0x0b, 0x0f, 0x11, 0x0f, 0x12, 0x12, 0x18);
+
+ return 0;
+}
+
+static const struct drm_display_mode gameforcechi_mode = {
+ .hdisplay = 640,
+ .hsync_start = 640 + 40,
+ .hsync_end = 640 + 40 + 2,
+ .htotal = 640 + 40 + 2 + 80,
+ .vdisplay = 480,
+ .vsync_start = 480 + 17,
+ .vsync_end = 480 + 17 + 5,
+ .vtotal = 480 + 17 + 5 + 13,
+ .clock = 23546,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 71,
+ .height_mm = 53,
+};
+
+static const struct st7703_panel_desc gameforcechi_desc = {
+ .mode = &gameforcechi_mode,
+ .lanes = 2,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = gameforcechi_init_sequence,
+};
+
static int st7703_enable(struct drm_panel *panel)
{
struct st7703 *ctx = panel_to_st7703(panel);
@@ -887,6 +973,7 @@ static void st7703_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id st7703_of_match[] = {
{ .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc },
+ { .compatible = "gameforce,chi-panel", .data = &gameforcechi_desc },
{ .compatible = "powkiddy,rgb10max3-panel", .data = &rgb10max3_panel_desc },
{ .compatible = "powkiddy,rgb30-panel", .data = &rgb30panel_desc },
{ .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc },
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index b73448cf349d..d447db912a61 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -550,10 +550,8 @@ static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
of_node_put(dsi1);
- if (!dsi1_host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!dsi1_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
/* register the second DSI device */
dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695283..272490b9565b 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -197,7 +197,9 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
+ ctx->supplies[0].init_load_uA = 32000;
ctx->supplies[1].supply = "vdd3p3";
+ ctx->supplies[1].init_load_uA = 13200;
ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -227,22 +229,8 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
goto err_dsi_attach;
}
- ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
- if (ret) {
- dev_err(dev, "regulator set load failed for vdda supply ret = %d\n", ret);
- goto err_set_load;
- }
-
- ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
- if (ret) {
- dev_err(dev, "regulator set load failed for vdd3p3 supply ret = %d\n", ret);
- goto err_set_load;
- }
-
return 0;
-err_set_load:
- mipi_dsi_detach(dsi);
err_dsi_attach:
drm_panel_remove(&ctx->panel);
return ret;
@@ -253,8 +241,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index 2c01c1e7523e..7da2b3f02ed9 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -12,6 +12,4 @@ panfrost-y := \
panfrost_perfcnt.o \
panfrost_dump.o
-panfrost-$(CONFIG_DEBUG_FS) += panfrost_debugfs.o
-
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.c b/drivers/gpu/drm/panfrost/panfrost_debugfs.c
deleted file mode 100644
index 72d4286a6bf7..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2023 Collabora ltd. */
-/* Copyright 2023 Amazon.com, Inc. or its affiliates. */
-
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_file.h>
-#include <drm/panfrost_drm.h>
-
-#include "panfrost_device.h"
-#include "panfrost_gpu.h"
-#include "panfrost_debugfs.h"
-
-void panfrost_debugfs_init(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev->dev));
-
- debugfs_create_atomic_t("profile", 0600, minor->debugfs_root, &pfdev->profile_mode);
-}
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.h b/drivers/gpu/drm/panfrost/panfrost_debugfs.h
deleted file mode 100644
index c5af5f35877f..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2023 Collabora ltd.
- * Copyright 2023 Amazon.com, Inc. or its affiliates.
- */
-
-#ifndef PANFROST_DEBUGFS_H
-#define PANFROST_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-void panfrost_debugfs_init(struct drm_minor *minor);
-#endif
-
-#endif /* PANFROST_DEBUGFS_H */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 62f7e3527385..cffcb0ac7c11 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -130,7 +130,7 @@ struct panfrost_device {
struct list_head scheduled_jobs;
struct panfrost_perfcnt *perfcnt;
- atomic_t profile_mode;
+ bool profile_mode;
struct mutex sched_lock;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a926d71e8131..ef9f6c0716d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -20,7 +20,6 @@
#include "panfrost_job.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
-#include "panfrost_debugfs.h"
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
@@ -551,10 +550,12 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS);
for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
- drm_printf(p, "drm-engine-%s:\t%llu ns\n",
- engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
- drm_printf(p, "drm-cycles-%s:\t%llu\n",
- engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ if (pfdev->profile_mode) {
+ drm_printf(p, "drm-engine-%s:\t%llu ns\n",
+ engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
+ drm_printf(p, "drm-cycles-%s:\t%llu\n",
+ engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ }
drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
engine_names[i], pfdev->pfdevfreq.fast_rate);
drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
@@ -600,10 +601,6 @@ static const struct drm_driver panfrost_drm_driver = {
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
-
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = panfrost_debugfs_init,
-#endif
};
static int panfrost_probe(struct platform_device *pdev)
@@ -692,6 +689,40 @@ static void panfrost_remove(struct platform_device *pdev)
drm_dev_put(ddev);
}
+static ssize_t profiling_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", pfdev->profile_mode);
+}
+
+static ssize_t profiling_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ bool value;
+ int err;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ pfdev->profile_mode = value;
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(profiling);
+
+static struct attribute *panfrost_attrs[] = {
+ &dev_attr_profiling.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(panfrost);
+
/*
* The OPP core wants the supply names to be NULL terminated, but we need the
* correct num_supplies value for regulator core. Hence, we NULL terminate here
@@ -789,6 +820,7 @@ static struct platform_driver panfrost_driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
+ .dev_groups = panfrost_groups,
},
};
module_platform_driver(panfrost_driver);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 9063ce254642..fd8e44992184 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -441,19 +441,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
- val, !val, 0, 1000);
+ val, !val, 0, 2000);
if (ret)
dev_err(pfdev->dev, "l2 power transition timeout");
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 0c2dbf6ef2a5..a61ef0af9a4e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -243,7 +243,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
- if (atomic_read(&pfdev->profile_mode)) {
+ if (pfdev->profile_mode) {
panfrost_cycle_counter_get(pfdev);
job->is_profiled = true;
job->start_time = ktime_get();
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f38385fe76bb..b91019cd5acb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ /* Can happen if the last fault only partially filled this
+ * section of the pages array before failing. In that case
+ * we skip already filled pages.
+ */
+ if (pages[i])
+ continue;
+
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
- goto err_pages;
+ goto err_unlock;
}
}
@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
- goto err_pages;
+ goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
-err_pages:
- drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
diff --git a/drivers/gpu/drm/panthor/Kconfig b/drivers/gpu/drm/panthor/Kconfig
new file mode 100644
index 000000000000..55b40ad07f3b
--- /dev/null
+++ b/drivers/gpu/drm/panthor/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+config DRM_PANTHOR
+ tristate "Panthor (DRM support for ARM Mali CSF-based GPUs)"
+ depends on DRM
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ depends on MMU
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DRM_EXEC
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_GPUVM
+ select DRM_SCHED
+ select IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_SUPPORT
+ select PM_DEVFREQ
+ help
+ DRM driver for ARM Mali CSF-based GPUs.
+
+ This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
+
+ Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
+ be supported with the panfrost driver as they are not CSF GPUs.
diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile
new file mode 100644
index 000000000000..15294719b09c
--- /dev/null
+++ b/drivers/gpu/drm/panthor/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+panthor-y := \
+ panthor_devfreq.o \
+ panthor_device.o \
+ panthor_drv.o \
+ panthor_fw.o \
+ panthor_gem.o \
+ panthor_gpu.o \
+ panthor_heap.o \
+ panthor_mmu.o \
+ panthor_sched.o
+
+obj-$(CONFIG_DRM_PANTHOR) += panthor.o
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
new file mode 100644
index 000000000000..c6d3c327cc24
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_managed.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+
+/**
+ * struct panthor_devfreq - Device frequency management
+ */
+struct panthor_devfreq {
+ /** @devfreq: devfreq device. */
+ struct devfreq *devfreq;
+
+ /** @gov_data: Governor data. */
+ struct devfreq_simple_ondemand_data gov_data;
+
+ /** @busy_time: Busy time. */
+ ktime_t busy_time;
+
+ /** @idle_time: Idle time. */
+ ktime_t idle_time;
+
+ /** @time_last_update: Last update time. */
+ ktime_t time_last_update;
+
+ /** @last_busy_state: True if the GPU was busy last time we updated the state. */
+ bool last_busy_state;
+
+ /**
+ * @lock: Lock used to protect busy_time, idle_time, time_last_update and
+ * last_busy_state.
+ *
+ * These fields can be accessed concurrently by panthor_devfreq_get_dev_status()
+ * and panthor_devfreq_record_{busy,idle}().
+ */
+ spinlock_t lock;
+};
+
+static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = pdevfreq->time_last_update;
+
+ if (pdevfreq->last_busy_state)
+ pdevfreq->busy_time += ktime_sub(now, last);
+ else
+ pdevfreq->idle_time += ktime_sub(now, last);
+
+ pdevfreq->time_last_update = now;
+}
+
+static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
+ return dev_pm_opp_set_rate(dev, *freq);
+}
+
+static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
+{
+ pdevfreq->busy_time = 0;
+ pdevfreq->idle_time = 0;
+ pdevfreq->time_last_update = ktime_get();
+}
+
+static int panthor_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ status->current_frequency = clk_get_rate(ptdev->clks.core);
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
+ pdevfreq->idle_time));
+
+ status->busy_time = ktime_to_ns(pdevfreq->busy_time);
+
+ panthor_devfreq_reset(pdevfreq);
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+
+ drm_dbg(&ptdev->base, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile panthor_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50, /* ~3 frames */
+ .target = panthor_devfreq_target,
+ .get_dev_status = panthor_devfreq_get_dev_status,
+};
+
+int panthor_devfreq_init(struct panthor_device *ptdev)
+{
+ /* There's actually 2 regulators (mali and sram), but the OPP core only
+ * supports one.
+ *
+ * We assume the sram regulator is coupled with the mali one and let
+ * the coupling logic deal with voltage updates.
+ */
+ static const char * const reg_names[] = { "mali", NULL };
+ struct thermal_cooling_device *cooling;
+ struct device *dev = ptdev->base.dev;
+ struct panthor_devfreq *pdevfreq;
+ struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ int ret;
+
+ pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
+ if (!pdevfreq)
+ return -ENOMEM;
+
+ ptdev->devfreq = pdevfreq;
+
+ ret = devm_pm_opp_set_regulators(dev, reg_names);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
+
+ return ret;
+ }
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&pdevfreq->lock);
+
+ panthor_devfreq_reset(pdevfreq);
+
+ cur_freq = clk_get_rate(ptdev->clks.core);
+
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panthor_devfreq_profile.initial_freq = cur_freq;
+
+ /* Regulator coupling only takes care of synchronizing/balancing voltage
+ * updates, but the coupled regulator needs to be enabled manually.
+ *
+ * We use devm_regulator_get_enable_optional() and keep the sram supply
+ * enabled until the device is removed, just like we do for the mali
+ * supply, which is enabled when dev_pm_opp_set_opp(dev, opp) is called,
+ * and disabled when the opp_table is torn down, using the devm action.
+ *
+ * If we really care about disabling regulators on suspend, we should:
+ * - use devm_regulator_get_optional() here
+ * - call dev_pm_opp_set_opp(dev, NULL) before leaving this function
+ * (this disables the regulator passed to the OPP layer)
+ * - call dev_pm_opp_set_opp(dev, NULL) and
+ * regulator_disable(ptdev->regulators.sram) in
+ * panthor_devfreq_suspend()
+ * - call dev_pm_opp_set_opp(dev, default_opp) and
+ * regulator_enable(ptdev->regulators.sram) in
+ * panthor_devfreq_resume()
+ *
+ * But without knowing if it's beneficial or not (in term of power
+ * consumption), or how much it slows down the suspend/resume steps,
+ * let's just keep regulators enabled for the device lifetime.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "sram");
+ if (ret && ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "Couldn't retrieve/enable sram supply\n");
+ return ret;
+ }
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
+ dev_pm_opp_put(opp);
+
+ /*
+ * Setup default thresholds for the simple_ondemand governor.
+ * The values are chosen based on experiments.
+ */
+ pdevfreq->gov_data.upthreshold = 45;
+ pdevfreq->gov_data.downdifferential = 5;
+
+ pdevfreq->devfreq = devm_devfreq_add_device(dev, &panthor_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &pdevfreq->gov_data);
+ if (IS_ERR(pdevfreq->devfreq)) {
+ DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(pdevfreq->devfreq);
+ pdevfreq->devfreq = NULL;
+ return ret;
+ }
+
+ cooling = devfreq_cooling_em_register(pdevfreq->devfreq, NULL);
+ if (IS_ERR(cooling))
+ DRM_DEV_INFO(dev, "Failed to register cooling device\n");
+
+ return 0;
+}
+
+int panthor_devfreq_resume(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+
+ if (!pdevfreq->devfreq)
+ return 0;
+
+ panthor_devfreq_reset(pdevfreq);
+
+ return devfreq_resume_device(pdevfreq->devfreq);
+}
+
+int panthor_devfreq_suspend(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+
+ if (!pdevfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(pdevfreq->devfreq);
+}
+
+void panthor_devfreq_record_busy(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ if (!pdevfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+ pdevfreq->last_busy_state = true;
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+}
+
+void panthor_devfreq_record_idle(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long irqflags;
+
+ if (!pdevfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&pdevfreq->lock, irqflags);
+
+ panthor_devfreq_update_utilization(pdevfreq);
+ pdevfreq->last_busy_state = false;
+
+ spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
new file mode 100644
index 000000000000..83a5c9522493
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANTHOR_DEVFREQ_H__
+#define __PANTHOR_DEVFREQ_H__
+
+struct devfreq;
+struct thermal_cooling_device;
+
+struct panthor_device;
+struct panthor_devfreq;
+
+int panthor_devfreq_init(struct panthor_device *ptdev);
+
+int panthor_devfreq_resume(struct panthor_device *ptdev);
+int panthor_devfreq_suspend(struct panthor_device *ptdev);
+
+void panthor_devfreq_record_busy(struct panthor_device *ptdev);
+void panthor_devfreq_record_idle(struct panthor_device *ptdev);
+
+#endif /* __PANTHOR_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
new file mode 100644
index 000000000000..75276cbeba20
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gpu.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+static int panthor_clk_init(struct panthor_device *ptdev)
+{
+ ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
+ if (IS_ERR(ptdev->clks.core))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.core),
+ "get 'core' clock failed");
+
+ ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
+ if (IS_ERR(ptdev->clks.stacks))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.stacks),
+ "get 'stacks' clock failed");
+
+ ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
+ if (IS_ERR(ptdev->clks.coregroup))
+ return dev_err_probe(ptdev->base.dev,
+ PTR_ERR(ptdev->clks.coregroup),
+ "get 'coregroup' clock failed");
+
+ drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
+ return 0;
+}
+
+void panthor_device_unplug(struct panthor_device *ptdev)
+{
+ /* This function can be called from two different path: the reset work
+ * and the platform device remove callback. drm_dev_unplug() doesn't
+ * deal with concurrent callers, so we have to protect drm_dev_unplug()
+ * calls with our own lock, and bail out if the device is already
+ * unplugged.
+ */
+ mutex_lock(&ptdev->unplug.lock);
+ if (drm_dev_is_unplugged(&ptdev->base)) {
+ /* Someone beat us, release the lock and wait for the unplug
+ * operation to be reported as done.
+ **/
+ mutex_unlock(&ptdev->unplug.lock);
+ wait_for_completion(&ptdev->unplug.done);
+ return;
+ }
+
+ /* Call drm_dev_unplug() so any access to HW blocks happening after
+ * that point get rejected.
+ */
+ drm_dev_unplug(&ptdev->base);
+
+ /* We do the rest of the unplug with the unplug lock released,
+ * future callers will wait on ptdev->unplug.done anyway.
+ */
+ mutex_unlock(&ptdev->unplug.lock);
+
+ drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
+
+ /* Now, try to cleanly shutdown the GPU before the device resources
+ * get reclaimed.
+ */
+ panthor_sched_unplug(ptdev);
+ panthor_fw_unplug(ptdev);
+ panthor_mmu_unplug(ptdev);
+ panthor_gpu_unplug(ptdev);
+
+ pm_runtime_dont_use_autosuspend(ptdev->base.dev);
+ pm_runtime_put_sync_suspend(ptdev->base.dev);
+
+ /* If PM is disabled, we need to call the suspend handler manually. */
+ if (!IS_ENABLED(CONFIG_PM))
+ panthor_device_suspend(ptdev->base.dev);
+
+ /* Report the unplug operation as done to unblock concurrent
+ * panthor_device_unplug() callers.
+ */
+ complete_all(&ptdev->unplug.done);
+}
+
+static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+
+ cancel_work_sync(&ptdev->reset.work);
+ destroy_workqueue(ptdev->reset.wq);
+}
+
+static void panthor_device_reset_work(struct work_struct *work)
+{
+ struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
+ int ret = 0, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
+ /*
+ * No need for a reset as the device has been (or will be)
+ * powered down
+ */
+ atomic_set(&ptdev->reset.pending, 0);
+ return;
+ }
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return;
+
+ panthor_sched_pre_reset(ptdev);
+ panthor_fw_pre_reset(ptdev, true);
+ panthor_mmu_pre_reset(ptdev);
+ panthor_gpu_soft_reset(ptdev);
+ panthor_gpu_l2_power_on(ptdev);
+ panthor_mmu_post_reset(ptdev);
+ ret = panthor_fw_post_reset(ptdev);
+ if (ret)
+ goto out_dev_exit;
+
+ atomic_set(&ptdev->reset.pending, 0);
+ panthor_sched_post_reset(ptdev);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+
+ if (ret) {
+ panthor_device_unplug(ptdev);
+ drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
+ }
+}
+
+static bool panthor_device_is_initialized(struct panthor_device *ptdev)
+{
+ return !!ptdev->scheduler;
+}
+
+static void panthor_device_free_page(struct drm_device *ddev, void *data)
+{
+ __free_page(data);
+}
+
+int panthor_device_init(struct panthor_device *ptdev)
+{
+ u32 *dummy_page_virt;
+ struct resource *res;
+ struct page *p;
+ int ret;
+
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ init_completion(&ptdev->unplug.done);
+ ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
+ if (ret)
+ return ret;
+
+ ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
+ if (ret)
+ return ret;
+
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ ptdev->pm.dummy_latest_flush = p;
+ dummy_page_virt = page_address(p);
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
+ ptdev->pm.dummy_latest_flush);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the dummy page holding the latest flush to 1. This will cause the
+ * flush to avoided as we know it isn't necessary if the submission
+ * happens while the dummy page is mapped. Zero cannot be used because
+ * that means 'always flush'.
+ */
+ *dummy_page_virt = 1;
+
+ INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
+ ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
+ if (!ptdev->reset.wq)
+ return -ENOMEM;
+
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
+ if (ret)
+ return ret;
+
+ ret = panthor_clk_init(ptdev);
+ if (ret)
+ return ret;
+
+ ret = panthor_devfreq_init(ptdev);
+ if (ret)
+ return ret;
+
+ ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
+ 0, &res);
+ if (IS_ERR(ptdev->iomem))
+ return PTR_ERR(ptdev->iomem);
+
+ ptdev->phys_addr = res->start;
+
+ ret = devm_pm_runtime_enable(ptdev->base.dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (ret)
+ return ret;
+
+ /* If PM is disabled, we need to call panthor_device_resume() manually. */
+ if (!IS_ENABLED(CONFIG_PM)) {
+ ret = panthor_device_resume(ptdev->base.dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = panthor_gpu_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = panthor_mmu_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
+ ret = panthor_fw_init(ptdev);
+ if (ret)
+ goto err_unplug_mmu;
+
+ ret = panthor_sched_init(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ /* ~3 frames */
+ pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
+ pm_runtime_use_autosuspend(ptdev->base.dev);
+
+ ret = drm_dev_register(&ptdev->base, 0);
+ if (ret)
+ goto err_disable_autosuspend;
+
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+ return 0;
+
+err_disable_autosuspend:
+ pm_runtime_dont_use_autosuspend(ptdev->base.dev);
+ panthor_sched_unplug(ptdev);
+
+err_unplug_fw:
+ panthor_fw_unplug(ptdev);
+
+err_unplug_mmu:
+ panthor_mmu_unplug(ptdev);
+
+err_unplug_gpu:
+ panthor_gpu_unplug(ptdev);
+
+err_rpm_put:
+ pm_runtime_put_sync_suspend(ptdev->base.dev);
+ return ret;
+}
+
+#define PANTHOR_EXCEPTION(id) \
+ [DRM_PANTHOR_EXCEPTION_ ## id] = { \
+ .name = #id, \
+ }
+
+struct panthor_exception_info {
+ const char *name;
+};
+
+static const struct panthor_exception_info panthor_exception_infos[] = {
+ PANTHOR_EXCEPTION(OK),
+ PANTHOR_EXCEPTION(TERMINATED),
+ PANTHOR_EXCEPTION(KABOOM),
+ PANTHOR_EXCEPTION(EUREKA),
+ PANTHOR_EXCEPTION(ACTIVE),
+ PANTHOR_EXCEPTION(CS_RES_TERM),
+ PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
+ PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
+ PANTHOR_EXCEPTION(CS_BUS_FAULT),
+ PANTHOR_EXCEPTION(CS_INSTR_INVALID),
+ PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
+ PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
+ PANTHOR_EXCEPTION(INSTR_INVALID_PC),
+ PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
+ PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
+ PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
+ PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
+ PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
+ PANTHOR_EXCEPTION(IMPRECISE_FAULT),
+ PANTHOR_EXCEPTION(OOM),
+ PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
+ PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
+ PANTHOR_EXCEPTION(GPU_BUS_FAULT),
+ PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
+ PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
+ PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
+ PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
+ PANTHOR_EXCEPTION(PERM_FAULT_0),
+ PANTHOR_EXCEPTION(PERM_FAULT_1),
+ PANTHOR_EXCEPTION(PERM_FAULT_2),
+ PANTHOR_EXCEPTION(PERM_FAULT_3),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_1),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_2),
+ PANTHOR_EXCEPTION(ACCESS_FLAG_3),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
+ PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
+ PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
+};
+
+const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
+{
+ if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
+ !panthor_exception_infos[exception_code].name)
+ return "Unknown exception type";
+
+ return panthor_exception_infos[exception_code].name;
+}
+
+static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct panthor_device *ptdev = vma->vm_private_data;
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pfn;
+ pgprot_t pgprot;
+ vm_fault_t ret;
+ bool active;
+ int cookie;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&ptdev->pm.mmio_lock);
+ active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
+
+ switch (offset) {
+ case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
+ if (active)
+ pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
+ else
+ pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
+ break;
+
+ default:
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ pgprot = vma->vm_page_prot;
+ if (active)
+ pgprot = pgprot_noncached(pgprot);
+
+ ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
+
+out_unlock:
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static const struct vm_operations_struct panthor_mmio_vm_ops = {
+ .fault = panthor_mmio_vm_fault,
+};
+
+int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
+{
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+
+ switch (offset) {
+ case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
+ (vma->vm_flags & (VM_WRITE | VM_EXEC)))
+ return -EINVAL;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Defer actual mapping to the fault handler. */
+ vma->vm_private_data = ptdev;
+ vma->vm_ops = &panthor_mmio_vm_ops;
+ vm_flags_set(vma,
+ VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
+ VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
+ return 0;
+}
+
+int panthor_device_resume(struct device *dev)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ int ret, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
+ return -EINVAL;
+
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
+
+ ret = clk_prepare_enable(ptdev->clks.core);
+ if (ret)
+ goto err_set_suspended;
+
+ ret = clk_prepare_enable(ptdev->clks.stacks);
+ if (ret)
+ goto err_disable_core_clk;
+
+ ret = clk_prepare_enable(ptdev->clks.coregroup);
+ if (ret)
+ goto err_disable_stacks_clk;
+
+ ret = panthor_devfreq_resume(ptdev);
+ if (ret)
+ goto err_disable_coregroup_clk;
+
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+ ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+ if (!ret) {
+ panthor_sched_resume(ptdev);
+ } else {
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ }
+
+ drm_dev_exit(cookie);
+
+ if (ret)
+ goto err_suspend_devfreq;
+ }
+
+ if (atomic_read(&ptdev->reset.pending))
+ queue_work(ptdev->reset.wq, &ptdev->reset.work);
+
+ /* Clear all IOMEM mappings pointing to this device after we've
+ * resumed. This way the fake mappings pointing to the dummy pages
+ * are removed and the real iomem mapping will be restored on next
+ * access.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ return 0;
+
+err_suspend_devfreq:
+ panthor_devfreq_suspend(ptdev);
+
+err_disable_coregroup_clk:
+ clk_disable_unprepare(ptdev->clks.coregroup);
+
+err_disable_stacks_clk:
+ clk_disable_unprepare(ptdev->clks.stacks);
+
+err_disable_core_clk:
+ clk_disable_unprepare(ptdev->clks.core);
+
+err_set_suspended:
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ return ret;
+}
+
+int panthor_device_suspend(struct device *dev)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ int ret, cookie;
+
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
+ return -EINVAL;
+
+ /* Clear all IOMEM mappings pointing to this device before we
+ * shutdown the power-domain and clocks. Failing to do that results
+ * in external aborts when the process accesses the iomem region.
+ * We change the state and call unmap_mapping_range() with the
+ * mmio_lock held to make sure the vm_fault handler won't set up
+ * invalid mappings.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ cancel_work_sync(&ptdev->reset.work);
+
+ /* We prepare everything as if we were resetting the GPU.
+ * The end of the reset will happen in the resume path though.
+ */
+ panthor_sched_suspend(ptdev);
+ panthor_fw_suspend(ptdev);
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ drm_dev_exit(cookie);
+ }
+
+ ret = panthor_devfreq_suspend(ptdev);
+ if (ret) {
+ if (panthor_device_is_initialized(ptdev) &&
+ drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+ drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+ panthor_sched_resume(ptdev);
+ drm_dev_exit(cookie);
+ }
+
+ goto err_set_active;
+ }
+
+ clk_disable_unprepare(ptdev->clks.coregroup);
+ clk_disable_unprepare(ptdev->clks.stacks);
+ clk_disable_unprepare(ptdev->clks.core);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ return 0;
+
+err_set_active:
+ /* If something failed and we have to revert back to an
+ * active state, we also need to clear the MMIO userspace
+ * mappings, so any dumb pages that were mapped while we
+ * were trying to suspend gets invalidated.
+ */
+ mutex_lock(&ptdev->pm.mmio_lock);
+ atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
+ unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
+ DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
+ mutex_unlock(&ptdev->pm.mmio_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
new file mode 100644
index 000000000000..2fdd671b38fd
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_DEVICE_H__
+#define __PANTHOR_DEVICE_H__
+
+#include <linux/atomic.h>
+#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+struct panthor_csf;
+struct panthor_csf_ctx;
+struct panthor_device;
+struct panthor_gpu;
+struct panthor_group_pool;
+struct panthor_heap_pool;
+struct panthor_job;
+struct panthor_mmu;
+struct panthor_fw;
+struct panthor_perfcnt;
+struct panthor_vm;
+struct panthor_vm_pool;
+
+/**
+ * enum panthor_device_pm_state - PM state
+ */
+enum panthor_device_pm_state {
+ /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
+ PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
+
+ /** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
+ PANTHOR_DEVICE_PM_STATE_RESUMING,
+
+ /** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
+ PANTHOR_DEVICE_PM_STATE_ACTIVE,
+
+ /** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
+ PANTHOR_DEVICE_PM_STATE_SUSPENDING,
+};
+
+/**
+ * struct panthor_irq - IRQ data
+ *
+ * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
+ */
+struct panthor_irq {
+ /** @ptdev: Panthor device */
+ struct panthor_device *ptdev;
+
+ /** @irq: IRQ number. */
+ int irq;
+
+ /** @mask: Current mask being applied to xxx_INT_MASK. */
+ u32 mask;
+
+ /** @suspended: Set to true when the IRQ is suspended. */
+ atomic_t suspended;
+};
+
+/**
+ * struct panthor_device - Panthor device
+ */
+struct panthor_device {
+ /** @base: Base drm_device. */
+ struct drm_device base;
+
+ /** @phys_addr: Physical address of the iomem region. */
+ phys_addr_t phys_addr;
+
+ /** @iomem: CPU mapping of the IOMEM region. */
+ void __iomem *iomem;
+
+ /** @clks: GPU clocks. */
+ struct {
+ /** @core: Core clock. */
+ struct clk *core;
+
+ /** @stacks: Stacks clock. This clock is optional. */
+ struct clk *stacks;
+
+ /** @coregroup: Core group clock. This clock is optional. */
+ struct clk *coregroup;
+ } clks;
+
+ /** @coherent: True if the CPU/GPU are memory coherent. */
+ bool coherent;
+
+ /** @gpu_info: GPU information. */
+ struct drm_panthor_gpu_info gpu_info;
+
+ /** @csif_info: Command stream interface information. */
+ struct drm_panthor_csif_info csif_info;
+
+ /** @gpu: GPU management data. */
+ struct panthor_gpu *gpu;
+
+ /** @fw: FW management data. */
+ struct panthor_fw *fw;
+
+ /** @mmu: MMU management data. */
+ struct panthor_mmu *mmu;
+
+ /** @scheduler: Scheduler management data. */
+ struct panthor_scheduler *scheduler;
+
+ /** @devfreq: Device frequency scaling management data. */
+ struct panthor_devfreq *devfreq;
+
+ /** @unplug: Device unplug related fields. */
+ struct {
+ /** @lock: Lock used to serialize unplug operations. */
+ struct mutex lock;
+
+ /**
+ * @done: Completion object signaled when the unplug
+ * operation is done.
+ */
+ struct completion done;
+ } unplug;
+
+ /** @reset: Reset related fields. */
+ struct {
+ /** @wq: Ordered worqueud used to schedule reset operations. */
+ struct workqueue_struct *wq;
+
+ /** @work: Reset work. */
+ struct work_struct work;
+
+ /** @pending: Set to true if a reset is pending. */
+ atomic_t pending;
+ } reset;
+
+ /** @pm: Power management related data. */
+ struct {
+ /** @state: Power state. */
+ atomic_t state;
+
+ /**
+ * @mmio_lock: Lock protecting MMIO userspace CPU mappings.
+ *
+ * This is needed to ensure we map the dummy IO pages when
+ * the device is being suspended, and the real IO pages when
+ * the device is being resumed. We can't just do with the
+ * state atomicity to deal with this race.
+ */
+ struct mutex mmio_lock;
+
+ /**
+ * @dummy_latest_flush: Dummy LATEST_FLUSH page.
+ *
+ * Used to replace the real LATEST_FLUSH page when the GPU
+ * is suspended.
+ */
+ struct page *dummy_latest_flush;
+ } pm;
+};
+
+/**
+ * struct panthor_file - Panthor file
+ */
+struct panthor_file {
+ /** @ptdev: Device attached to this file. */
+ struct panthor_device *ptdev;
+
+ /** @vms: VM pool attached to this file. */
+ struct panthor_vm_pool *vms;
+
+ /** @groups: Scheduling group pool attached to this file. */
+ struct panthor_group_pool *groups;
+};
+
+int panthor_device_init(struct panthor_device *ptdev);
+void panthor_device_unplug(struct panthor_device *ptdev);
+
+/**
+ * panthor_device_schedule_reset() - Schedules a reset operation
+ */
+static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
+{
+ if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
+ atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
+ queue_work(ptdev->reset.wq, &ptdev->reset.work);
+}
+
+/**
+ * panthor_device_reset_is_pending() - Checks if a reset is pending.
+ *
+ * Return: true if a reset is pending, false otherwise.
+ */
+static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
+{
+ return atomic_read(&ptdev->reset.pending) != 0;
+}
+
+int panthor_device_mmap_io(struct panthor_device *ptdev,
+ struct vm_area_struct *vma);
+
+int panthor_device_resume(struct device *dev);
+int panthor_device_suspend(struct device *dev);
+
+enum drm_panthor_exception_type {
+ DRM_PANTHOR_EXCEPTION_OK = 0x00,
+ DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
+ DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
+ DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
+ DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
+ DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
+ DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
+ DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
+ DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
+ DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
+ DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
+ DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
+ DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
+ DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
+ DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
+ DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
+ DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
+ DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
+ DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
+ DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
+ DRM_PANTHOR_EXCEPTION_OOM = 0x60,
+ DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
+ DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
+ DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
+ DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
+ DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
+ DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
+ DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
+ DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
+ DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
+ DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
+ DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
+};
+
+/**
+ * panthor_exception_is_fault() - Checks if an exception is a fault.
+ *
+ * Return: true if the exception is a fault, false otherwise.
+ */
+static inline bool
+panthor_exception_is_fault(u32 exception_code)
+{
+ return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
+}
+
+const char *panthor_exception_name(struct panthor_device *ptdev,
+ u32 exception_code);
+
+/**
+ * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
+ * registration function.
+ *
+ * The boiler-plate to gracefully deal with shared interrupts is
+ * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
+ * just after the actual handler. The handler prototype is:
+ *
+ * void (*handler)(struct panthor_device *, u32 status);
+ */
+#define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler) \
+static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) \
+{ \
+ struct panthor_irq *pirq = data; \
+ struct panthor_device *ptdev = pirq->ptdev; \
+ \
+ if (atomic_read(&pirq->suspended)) \
+ return IRQ_NONE; \
+ if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \
+ return IRQ_NONE; \
+ \
+ gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0); \
+ return IRQ_WAKE_THREAD; \
+} \
+ \
+static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data) \
+{ \
+ struct panthor_irq *pirq = data; \
+ struct panthor_device *ptdev = pirq->ptdev; \
+ irqreturn_t ret = IRQ_NONE; \
+ \
+ while (true) { \
+ u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \
+ \
+ if (!status) \
+ break; \
+ \
+ gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
+ \
+ __handler(ptdev, status); \
+ ret = IRQ_HANDLED; \
+ } \
+ \
+ if (!atomic_read(&pirq->suspended)) \
+ gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
+ \
+ return ret; \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \
+{ \
+ pirq->mask = 0; \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \
+ synchronize_irq(pirq->irq); \
+ atomic_set(&pirq->suspended, true); \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \
+{ \
+ atomic_set(&pirq->suspended, false); \
+ pirq->mask = mask; \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \
+} \
+ \
+static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
+ struct panthor_irq *pirq, \
+ int irq, u32 mask) \
+{ \
+ pirq->ptdev = ptdev; \
+ pirq->irq = irq; \
+ panthor_ ## __name ## _irq_resume(pirq, mask); \
+ \
+ return devm_request_threaded_irq(ptdev->base.dev, irq, \
+ panthor_ ## __name ## _irq_raw_handler, \
+ panthor_ ## __name ## _irq_threaded_handler, \
+ IRQF_SHARED, KBUILD_MODNAME "-" # __name, \
+ pirq); \
+}
+
+extern struct workqueue_struct *panthor_cleanup_wq;
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
new file mode 100644
index 000000000000..b8a84f26b3ef
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -0,0 +1,1488 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+/**
+ * DOC: user <-> kernel object copy helpers.
+ */
+
+/**
+ * panthor_set_uobj() - Copy kernel object to user object.
+ * @usr_ptr: Users pointer.
+ * @usr_size: Size of the user object.
+ * @min_size: Minimum size for this object.
+ * @kern_size: Size of the kernel object.
+ * @in: Address of the kernel object to copy.
+ *
+ * Helper automating kernel -> user object copies.
+ *
+ * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
+{
+ /* User size shouldn't be smaller than the minimal object size. */
+ if (usr_size < min_size)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
+ return -EFAULT;
+
+ /* When the kernel object is smaller than the user object, we fill the gap with
+ * zeros.
+ */
+ if (usr_size > kern_size &&
+ clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
+ * @in: The object array to copy.
+ * @min_stride: Minimum array stride.
+ * @obj_size: Kernel object size.
+ *
+ * Helper automating user -> kernel object copies.
+ *
+ * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
+ *
+ * Return: newly allocated object array or an ERR_PTR on error.
+ */
+static void *
+panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
+ u32 obj_size)
+{
+ int ret = 0;
+ void *out_alloc;
+
+ /* User stride must be at least the minimum object size, otherwise it might
+ * lack useful information.
+ */
+ if (in->stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ if (!in->count)
+ return NULL;
+
+ out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
+ if (!out_alloc)
+ return ERR_PTR(-ENOMEM);
+
+ if (obj_size == in->stride) {
+ /* Fast path when user/kernel have the same uAPI header version. */
+ if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
+ (unsigned long)obj_size * in->count))
+ ret = -EFAULT;
+ } else {
+ void __user *in_ptr = u64_to_user_ptr(in->array);
+ void *out_ptr = out_alloc;
+
+ /* If the sizes differ, we need to copy elements one by one. */
+ for (u32 i = 0; i < in->count; i++) {
+ ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
+ if (ret)
+ break;
+
+ out_ptr += obj_size;
+ in_ptr += in->stride;
+ }
+ }
+
+ if (ret) {
+ kvfree(out_alloc);
+ return ERR_PTR(ret);
+ }
+
+ return out_alloc;
+}
+
+/**
+ * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
+ * @_typename: Object type.
+ * @_last_mandatory_field: Last mandatory field.
+ *
+ * Get the minimum user object size based on the last mandatory field name,
+ * A.K.A, the name of the last field of the structure at the time this
+ * structure was added to the uAPI.
+ *
+ * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
+ */
+#define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
+ (offsetof(_typename, _last_mandatory_field) + \
+ sizeof(((_typename *)NULL)->_last_mandatory_field))
+
+/**
+ * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
+ * evolutions.
+ * @_typename: Object type.
+ * @_last_mandatory_field: Last mandatory field.
+ *
+ * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
+ */
+#define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
+ _typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
+
+/**
+ * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
+ * @_obj_name: Object to get the minimum size of.
+ *
+ * Don't use this macro directly, it's automatically called by
+ * PANTHOR_UOBJ_{SET,GET_ARRAY}().
+ */
+#define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
+ _Generic(_obj_name, \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
+
+/**
+ * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
+ * @_dest_usr_ptr: User pointer to copy to.
+ * @_usr_size: Size of the user object.
+ * @_src_obj: Kernel object to copy (not a pointer).
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
+ panthor_set_uobj(_dest_usr_ptr, _usr_size, \
+ PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
+ sizeof(_src_obj), &(_src_obj))
+
+/**
+ * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
+ * object array.
+ * @_dest_array: Local variable that will hold the newly allocated kernel
+ * object array.
+ * @_uobj_array: The drm_panthor_obj_array object describing the user object
+ * array.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
+ ({ \
+ typeof(_dest_array) _tmp; \
+ _tmp = panthor_get_uobj_array(_uobj_array, \
+ PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
+ sizeof((_dest_array)[0])); \
+ if (!IS_ERR(_tmp)) \
+ _dest_array = _tmp; \
+ PTR_ERR_OR_ZERO(_tmp); \
+ })
+
+/**
+ * struct panthor_sync_signal - Represent a synchronization object point to attach
+ * our job fence to.
+ *
+ * This structure is here to keep track of fences that are currently bound to
+ * a specific syncobj point.
+ *
+ * At the beginning of a job submission, the fence
+ * is retrieved from the syncobj itself, and can be NULL if no fence was attached
+ * to this point.
+ *
+ * At the end, it points to the fence of the last job that had a
+ * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
+ *
+ * With jobs being submitted in batches, the fence might change several times during
+ * the process, allowing one job to wait on a job that's part of the same submission
+ * but appears earlier in the drm_panthor_group_submit::queue_submits array.
+ */
+struct panthor_sync_signal {
+ /** @node: list_head to track signal ops within a submit operation */
+ struct list_head node;
+
+ /** @handle: The syncobj handle. */
+ u32 handle;
+
+ /**
+ * @point: The syncobj point.
+ *
+ * Zero for regular syncobjs, and non-zero for timeline syncobjs.
+ */
+ u64 point;
+
+ /**
+ * @syncobj: The sync object pointed by @handle.
+ */
+ struct drm_syncobj *syncobj;
+
+ /**
+ * @chain: Chain object used to link the new fence to an existing
+ * timeline syncobj.
+ *
+ * NULL for regular syncobj, non-NULL for timeline syncobjs.
+ */
+ struct dma_fence_chain *chain;
+
+ /**
+ * @fence: The fence to assign to the syncobj or syncobj-point.
+ */
+ struct dma_fence *fence;
+};
+
+/**
+ * struct panthor_job_ctx - Job context
+ */
+struct panthor_job_ctx {
+ /** @job: The job that is about to be submitted to drm_sched. */
+ struct drm_sched_job *job;
+
+ /** @syncops: Array of sync operations. */
+ struct drm_panthor_sync_op *syncops;
+
+ /** @syncop_count: Number of sync operations. */
+ u32 syncop_count;
+};
+
+/**
+ * struct panthor_submit_ctx - Submission context
+ *
+ * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
+ * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
+ * initialization and cleanup steps.
+ */
+struct panthor_submit_ctx {
+ /** @file: DRM file this submission happens on. */
+ struct drm_file *file;
+
+ /**
+ * @signals: List of struct panthor_sync_signal.
+ *
+ * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
+ * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
+ * matching the syncobj+point exists before calling
+ * drm_syncobj_find_fence(). This allows us to describe dependencies
+ * existing between jobs that are part of the same batch.
+ */
+ struct list_head signals;
+
+ /** @jobs: Array of jobs. */
+ struct panthor_job_ctx *jobs;
+
+ /** @job_count: Number of entries in the @jobs array. */
+ u32 job_count;
+
+ /** @exec: drm_exec context used to acquire and prepare resv objects. */
+ struct drm_exec exec;
+};
+
+#define PANTHOR_SYNC_OP_FLAGS_MASK \
+ (DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
+
+static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
+{
+ return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
+}
+
+static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
+{
+ /* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
+ return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
+}
+
+/**
+ * panthor_check_sync_op() - Check drm_panthor_sync_op fields
+ * @sync_op: The sync operation to check.
+ *
+ * Return: 0 on success, -EINVAL otherwise.
+ */
+static int
+panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
+{
+ u8 handle_type;
+
+ if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
+ return -EINVAL;
+
+ handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
+ if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
+ handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
+ return -EINVAL;
+
+ if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
+ sync_op->timeline_value != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
+ * @sig_sync: Signal object to free.
+ */
+static void
+panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
+{
+ if (!sig_sync)
+ return;
+
+ drm_syncobj_put(sig_sync->syncobj);
+ dma_fence_chain_free(sig_sync->chain);
+ dma_fence_put(sig_sync->fence);
+ kfree(sig_sync);
+}
+
+/**
+ * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
+ * @ctx: Context to add the signal operation to.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: 0 on success, otherwise negative error value.
+ */
+static int
+panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+ struct dma_fence *cur_fence;
+ int ret;
+
+ sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
+ if (!sig_sync)
+ return -ENOMEM;
+
+ sig_sync->handle = handle;
+ sig_sync->point = point;
+
+ if (point > 0) {
+ sig_sync->chain = dma_fence_chain_alloc();
+ if (!sig_sync->chain) {
+ ret = -ENOMEM;
+ goto err_free_sig_sync;
+ }
+ }
+
+ sig_sync->syncobj = drm_syncobj_find(ctx->file, handle);
+ if (!sig_sync->syncobj) {
+ ret = -EINVAL;
+ goto err_free_sig_sync;
+ }
+
+ /* Retrieve the current fence attached to that point. It's
+ * perfectly fine to get a NULL fence here, it just means there's
+ * no fence attached to that point yet.
+ */
+ if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
+ sig_sync->fence = cur_fence;
+
+ list_add_tail(&sig_sync->node, &ctx->signals);
+
+ return 0;
+
+err_free_sig_sync:
+ panthor_sync_signal_free(sig_sync);
+ return ret;
+}
+
+/**
+ * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
+ * submit context.
+ * @ctx: Context to search the signal operation in.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: A valid panthor_sync_signal object if found, NULL otherwise.
+ */
+static struct panthor_sync_signal *
+panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ list_for_each_entry(sig_sync, &ctx->signals, node) {
+ if (handle == sig_sync->handle && point == sig_sync->point)
+ return sig_sync;
+ }
+
+ return NULL;
+}
+
+/**
+ * panthor_submit_ctx_add_job() - Add a job to a submit context
+ * @ctx: Context to search the signal operation in.
+ * @idx: Index of the job in the context.
+ * @job: Job to add.
+ * @syncs: Sync operations provided by userspace.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
+ struct drm_sched_job *job,
+ const struct drm_panthor_obj_array *syncs)
+{
+ int ret;
+
+ ctx->jobs[idx].job = job;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
+ if (ret)
+ return ret;
+
+ ctx->jobs[idx].syncop_count = syncs->count;
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
+ * @ctx: Context to search the signal operation in.
+ * @handle: Syncobj handle.
+ * @point: Syncobj point.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
+ if (sig_sync)
+ return 0;
+
+ return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
+}
+
+/**
+ * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
+ * on the signal operations specified by a job.
+ * @ctx: Context to search the signal operation in.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
+ struct panthor_device,
+ base);
+ struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct dma_fence *old_fence;
+ struct panthor_sync_signal *sig_sync;
+
+ if (!sync_op_is_signal(&sync_ops[i]))
+ continue;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (drm_WARN_ON(&ptdev->base, !sig_sync))
+ return -EINVAL;
+
+ old_fence = sig_sync->fence;
+ sig_sync->fence = dma_fence_get(done_fence);
+ dma_fence_put(old_fence);
+
+ if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
+ * and add them to the context.
+ * @ctx: Context to search the signal operation in.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ int ret;
+
+ if (!sync_op_is_signal(&sync_ops[i]))
+ continue;
+
+ ret = panthor_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ ret = panthor_submit_ctx_get_sync_signal(ctx,
+ sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
+ * the currently assigned fence to the associated syncobj.
+ * @ctx: Context to push fences on.
+ *
+ * This is the last step of a submission procedure, and is done once we know the submission
+ * is effective and job fences are guaranteed to be signaled in finite time.
+ */
+static void
+panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
+{
+ struct panthor_sync_signal *sig_sync;
+
+ list_for_each_entry(sig_sync, &ctx->signals, node) {
+ if (sig_sync->chain) {
+ drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
+ sig_sync->fence, sig_sync->point);
+ sig_sync->chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
+ }
+ }
+}
+
+/**
+ * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
+ * job dependencies.
+ * @ctx: Submit context.
+ * @job_idx: Index of the job to operate on.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
+ u32 job_idx)
+{
+ struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
+ struct panthor_device,
+ base);
+ const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
+ struct drm_sched_job *job = ctx->jobs[job_idx].job;
+ u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
+ int ret = 0;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct panthor_sync_signal *sig_sync;
+ struct dma_fence *fence;
+
+ if (!sync_op_is_wait(&sync_ops[i]))
+ continue;
+
+ ret = panthor_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
+ sync_ops[i].timeline_value);
+ if (sig_sync) {
+ if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
+ return -EINVAL;
+
+ fence = dma_fence_get(sig_sync->fence);
+ } else {
+ ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
+ sync_ops[i].timeline_value,
+ 0, &fence);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_sched_job_add_dependency(job, fence);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
+ * and add them to the submit context.
+ * @ctx: Submit context.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ int ret;
+
+ ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
+ * @ctx: Submit context.
+ *
+ * Must be called after the resv preparation has been taken care of.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ int ret;
+
+ ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i);
+ if (ret)
+ return ret;
+
+ drm_sched_job_arm(ctx->jobs[i].job);
+
+ ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
+ * @ctx: Submit context.
+ * @upd_resvs: Callback used to update reservation objects that were previously
+ * preapred.
+ */
+static void
+panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
+ void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
+{
+ for (u32 i = 0; i < ctx->job_count; i++) {
+ upd_resvs(&ctx->exec, ctx->jobs[i].job);
+ drm_sched_entity_push_job(ctx->jobs[i].job);
+
+ /* Job is owned by the scheduler now. */
+ ctx->jobs[i].job = NULL;
+ }
+
+ panthor_submit_ctx_push_fences(ctx);
+}
+
+/**
+ * panthor_submit_ctx_init() - Initializes a submission context
+ * @ctx: Submit context to initialize.
+ * @file: drm_file this submission happens on.
+ * @job_count: Number of jobs that will be submitted.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
+ struct drm_file *file, u32 job_count)
+{
+ ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ctx->jobs)
+ return -ENOMEM;
+
+ ctx->file = file;
+ ctx->job_count = job_count;
+ INIT_LIST_HEAD(&ctx->signals);
+ drm_exec_init(&ctx->exec,
+ DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
+ 0);
+ return 0;
+}
+
+/**
+ * panthor_submit_ctx_cleanup() - Cleanup a submission context
+ * @ctx: Submit context to cleanup.
+ * @job_put: Job put callback.
+ */
+static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
+ void (*job_put)(struct drm_sched_job *))
+{
+ struct panthor_sync_signal *sig_sync, *tmp;
+ unsigned long i;
+
+ drm_exec_fini(&ctx->exec);
+
+ list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
+ panthor_sync_signal_free(sig_sync);
+
+ for (i = 0; i < ctx->job_count; i++) {
+ job_put(ctx->jobs[i].job);
+ kvfree(ctx->jobs[i].syncops);
+ }
+
+ kvfree(ctx->jobs);
+}
+
+static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct drm_panthor_dev_query *args = data;
+
+ if (!args->pointer) {
+ switch (args->type) {
+ case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
+ args->size = sizeof(ptdev->gpu_info);
+ return 0;
+
+ case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
+ args->size = sizeof(ptdev->csif_info);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (args->type) {
+ case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
+
+ case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PANTHOR_VM_CREATE_FLAGS 0
+
+static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = panthor_vm_pool_create_vm(ptdev, pfile->vms, args);
+ if (ret >= 0) {
+ args->id = ret;
+ ret = 0;
+ }
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ return panthor_vm_pool_destroy_vm(pfile->vms, args->id);
+}
+
+#define PANTHOR_BO_FLAGS DRM_PANTHOR_BO_NO_MMAP
+
+static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_bo_create *args = data;
+ struct panthor_vm *vm = NULL;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || args->pad ||
+ (args->flags & ~PANTHOR_BO_FLAGS)) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ if (args->exclusive_vm_id) {
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id);
+ if (!vm) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+ }
+
+ ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size,
+ args->flags, &args->handle);
+
+ panthor_vm_put(vm);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_mmap_offset *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+out:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_submit *args = data;
+ struct drm_panthor_queue_submit *jobs_args;
+ struct panthor_submit_ctx ctx;
+ int ret = 0, cookie;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
+ if (ret)
+ goto out_dev_exit;
+
+ ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count);
+ if (ret)
+ goto out_free_jobs_args;
+
+ /* Create jobs and attach sync operations */
+ for (u32 i = 0; i < args->queue_submits.count; i++) {
+ const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
+ struct drm_sched_job *job;
+
+ job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ /*
+ * Collect signal operations on all jobs, such that each job can pick
+ * from it for its dependencies and update the fence to signal when the
+ * job is submitted.
+ */
+ ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /*
+ * We acquire/prepare revs on all jobs before proceeding with the
+ * dependency registration.
+ *
+ * This is solving two problems:
+ * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
+ * protected by a lock to make sure no concurrent access to the same
+ * entity get interleaved, which would mess up with the fence seqno
+ * ordering. Luckily, one of the resv being acquired is the VM resv,
+ * and a scheduling entity is only bound to a single VM. As soon as
+ * we acquire the VM resv, we should be safe.
+ * 2. Jobs might depend on fences that were issued by previous jobs in
+ * the same batch, so we can't add dependencies on all jobs before
+ * arming previous jobs and registering the fence to the signal
+ * array, otherwise we might miss dependencies, or point to an
+ * outdated fence.
+ */
+ if (args->queue_submits.count > 0) {
+ /* All jobs target the same group, so they also point to the same VM. */
+ struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
+
+ drm_exec_until_all_locked(&ctx.exec) {
+ ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
+ args->queue_submits.count);
+ }
+
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ /*
+ * Now that resvs are locked/prepared, we can iterate over each job to
+ * add the dependencies, arm the job fence, register the job fence to
+ * the signal array.
+ */
+ ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Nothing can fail after that point, so we can make our job fences
+ * visible to the outside world. Push jobs and set the job fences to
+ * the resv slots we reserved. This also pushes the fences to the
+ * syncobjs that are part of the signal array.
+ */
+ panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
+
+out_cleanup_submit_ctx:
+ panthor_submit_ctx_cleanup(&ctx, panthor_job_put);
+
+out_free_jobs_args:
+ kvfree(jobs_args);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ return panthor_group_destroy(pfile, args->group_handle);
+}
+
+static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_create *args = data;
+ struct drm_panthor_queue_create *queue_args;
+ int ret;
+
+ if (!args->queues.count)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
+ if (ret)
+ return ret;
+
+ ret = panthor_group_create(pfile, args, queue_args);
+ if (ret >= 0) {
+ args->group_handle = ret;
+ ret = 0;
+ }
+
+ kvfree(queue_args);
+ return ret;
+}
+
+static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_group_get_state *args = data;
+
+ return panthor_group_get_state(pfile, args);
+}
+
+static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_tiler_heap_create *args = data;
+ struct panthor_heap_pool *pool;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ pool = panthor_vm_get_heap_pool(vm, true);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto out_put_vm;
+ }
+
+ ret = panthor_heap_create(pool,
+ args->initial_chunk_count,
+ args->chunk_size,
+ args->max_chunks,
+ args->target_in_flight,
+ &args->tiler_heap_ctx_gpu_va,
+ &args->first_heap_chunk_gpu_va);
+ if (ret < 0)
+ goto out_put_heap_pool;
+
+ /* Heap pools are per-VM. We combine the VM and HEAP id to make
+ * a unique heap handle.
+ */
+ args->handle = (args->vm_id << 16) | ret;
+ ret = 0;
+
+out_put_heap_pool:
+ panthor_heap_pool_put(pool);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_tiler_heap_destroy *args = data;
+ struct panthor_heap_pool *pool;
+ struct panthor_vm *vm;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16);
+ if (!vm)
+ return -EINVAL;
+
+ pool = panthor_vm_get_heap_pool(vm, false);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto out_put_vm;
+ }
+
+ ret = panthor_heap_destroy(pool, args->handle & GENMASK(15, 0));
+ panthor_heap_pool_put(pool);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
+ struct drm_panthor_vm_bind *args,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_bind_op *jobs_args;
+ struct panthor_submit_ctx ctx;
+ struct panthor_vm *vm;
+ int ret = 0;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
+ if (ret)
+ goto out_put_vm;
+
+ ret = panthor_submit_ctx_init(&ctx, file, args->ops.count);
+ if (ret)
+ goto out_free_jobs_args;
+
+ for (u32 i = 0; i < args->ops.count; i++) {
+ struct drm_panthor_vm_bind_op *op = &jobs_args[i];
+ struct drm_sched_job *job;
+
+ job = panthor_vm_bind_job_create(file, vm, op);
+ if (IS_ERR(job)) {
+ ret = PTR_ERR(job);
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+
+ ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Prepare reservation objects for each VM_BIND job. */
+ drm_exec_until_all_locked(&ctx.exec) {
+ for (u32 i = 0; i < ctx.job_count; i++) {
+ ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
+ drm_exec_retry_on_contention(&ctx.exec);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+ }
+ }
+
+ ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
+ if (ret)
+ goto out_cleanup_submit_ctx;
+
+ /* Nothing can fail after that point. */
+ panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
+
+out_cleanup_submit_ctx:
+ panthor_submit_ctx_cleanup(&ctx, panthor_vm_bind_job_put);
+
+out_free_jobs_args:
+ kvfree(jobs_args);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
+ struct drm_panthor_vm_bind *args,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_bind_op *jobs_args;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
+ if (ret)
+ goto out_put_vm;
+
+ for (u32 i = 0; i < args->ops.count; i++) {
+ ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]);
+ if (ret) {
+ /* Update ops.count so the user knows where things failed. */
+ args->ops.count = i;
+ break;
+ }
+ }
+
+ kvfree(jobs_args);
+
+out_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+#define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
+
+static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_vm_bind *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
+ ret = panthor_ioctl_vm_bind_async(ddev, args, file);
+ else
+ ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_panthor_vm_get_state *args = data;
+ struct panthor_vm *vm;
+
+ vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
+ if (!vm)
+ return -EINVAL;
+
+ if (panthor_vm_is_unusable(vm))
+ args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
+ else
+ args->state = DRM_PANTHOR_VM_STATE_USABLE;
+
+ panthor_vm_put(vm);
+ return 0;
+}
+
+static int
+panthor_open(struct drm_device *ddev, struct drm_file *file)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_file *pfile;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
+ if (!pfile) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ pfile->ptdev = ptdev;
+
+ ret = panthor_vm_pool_create(pfile);
+ if (ret)
+ goto err_free_file;
+
+ ret = panthor_group_pool_create(pfile);
+ if (ret)
+ goto err_destroy_vm_pool;
+
+ file->driver_priv = pfile;
+ return 0;
+
+err_destroy_vm_pool:
+ panthor_vm_pool_destroy(pfile);
+
+err_free_file:
+ kfree(pfile);
+
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+panthor_postclose(struct drm_device *ddev, struct drm_file *file)
+{
+ struct panthor_file *pfile = file->driver_priv;
+
+ panthor_group_pool_destroy(pfile);
+ panthor_vm_pool_destroy(pfile);
+
+ kfree(pfile);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
+#define PANTHOR_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
+
+ PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+};
+
+static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file = filp->private_data;
+ struct panthor_file *pfile = file->driver_priv;
+ struct panthor_device *ptdev = pfile->ptdev;
+ u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ int ret, cookie;
+
+ if (!drm_dev_enter(file->minor->dev, &cookie))
+ return -ENODEV;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific. This converts a user MMIO offset into something the kernel
+ * driver understands.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT) &&
+ offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
+ DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ }
+#endif
+
+ if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
+ ret = panthor_device_mmap_io(ptdev, vma);
+ else
+ ret = drm_gem_mmap(filp, vma);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static const struct file_operations panthor_drm_driver_fops = {
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = panthor_mmap,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void panthor_debugfs_init(struct drm_minor *minor)
+{
+ panthor_mmu_debugfs_init(minor);
+}
+#endif
+
+/*
+ * PanCSF driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver panthor_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
+ .open = panthor_open,
+ .postclose = panthor_postclose,
+ .ioctls = panthor_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
+ .fops = &panthor_drm_driver_fops,
+ .name = "panthor",
+ .desc = "Panthor DRM driver",
+ .date = "20230801",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = panthor_gem_create_object,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panthor_debugfs_init,
+#endif
+};
+
+static int panthor_probe(struct platform_device *pdev)
+{
+ struct panthor_device *ptdev;
+
+ ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
+ struct panthor_device, base);
+ if (IS_ERR(ptdev))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ptdev);
+
+ return panthor_device_init(ptdev);
+}
+
+static void panthor_remove(struct platform_device *pdev)
+{
+ struct panthor_device *ptdev = platform_get_drvdata(pdev);
+
+ panthor_device_unplug(ptdev);
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-mali" },
+ { .compatible = "arm,mali-valhall-csf" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
+ panthor_device_suspend,
+ panthor_device_resume,
+ NULL);
+
+static struct platform_driver panthor_driver = {
+ .probe = panthor_probe,
+ .remove_new = panthor_remove,
+ .driver = {
+ .name = "panthor",
+ .pm = pm_ptr(&panthor_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+/*
+ * Workqueue used to cleanup stuff.
+ *
+ * We create a dedicated workqueue so we can drain on unplug and
+ * make sure all resources are freed before the module is unloaded.
+ */
+struct workqueue_struct *panthor_cleanup_wq;
+
+static int __init panthor_init(void)
+{
+ int ret;
+
+ ret = panthor_mmu_pt_cache_init();
+ if (ret)
+ return ret;
+
+ panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
+ if (!panthor_cleanup_wq) {
+ pr_err("panthor: Failed to allocate the workqueues");
+ ret = -ENOMEM;
+ goto err_mmu_pt_cache_fini;
+ }
+
+ ret = platform_driver_register(&panthor_driver);
+ if (ret)
+ goto err_destroy_cleanup_wq;
+
+ return 0;
+
+err_destroy_cleanup_wq:
+ destroy_workqueue(panthor_cleanup_wq);
+
+err_mmu_pt_cache_fini:
+ panthor_mmu_pt_cache_fini();
+ return ret;
+}
+module_init(panthor_init);
+
+static void __exit panthor_exit(void)
+{
+ platform_driver_unregister(&panthor_driver);
+ destroy_workqueue(panthor_cleanup_wq);
+ panthor_mmu_pt_cache_fini();
+}
+module_exit(panthor_exit);
+
+MODULE_AUTHOR("Panthor Project Developers");
+MODULE_DESCRIPTION("Panthor DRM Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
new file mode 100644
index 000000000000..181395e2859a
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -0,0 +1,1362 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+#include <asm/arch_timer.h>
+#endif
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+#define CSF_FW_NAME "mali_csffw.bin"
+
+#define PING_INTERVAL_MS 12000
+#define PROGRESS_TIMEOUT_CYCLES (5ull * 500 * 1024 * 1024)
+#define PROGRESS_TIMEOUT_SCALE_SHIFT 10
+#define IDLE_HYSTERESIS_US 800
+#define PWROFF_HYSTERESIS_US 10000
+
+/**
+ * struct panthor_fw_binary_hdr - Firmware binary header.
+ */
+struct panthor_fw_binary_hdr {
+ /** @magic: Magic value to check binary validity. */
+ u32 magic;
+#define CSF_FW_BINARY_HEADER_MAGIC 0xc3f13a6e
+
+ /** @minor: Minor FW version. */
+ u8 minor;
+
+ /** @major: Major FW version. */
+ u8 major;
+#define CSF_FW_BINARY_HEADER_MAJOR_MAX 0
+
+ /** @padding1: MBZ. */
+ u16 padding1;
+
+ /** @version_hash: FW version hash. */
+ u32 version_hash;
+
+ /** @padding2: MBZ. */
+ u32 padding2;
+
+ /** @size: FW binary size. */
+ u32 size;
+};
+
+/**
+ * enum panthor_fw_binary_entry_type - Firmware binary entry type
+ */
+enum panthor_fw_binary_entry_type {
+ /** @CSF_FW_BINARY_ENTRY_TYPE_IFACE: Host <-> FW interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_IFACE = 0,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_CONFIG: FW config. */
+ CSF_FW_BINARY_ENTRY_TYPE_CONFIG = 1,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST: Unit-tests. */
+ CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST = 2,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER: Trace buffer interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER = 3,
+
+ /** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */
+ CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4,
+};
+
+#define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff)
+#define CSF_FW_BINARY_ENTRY_SIZE(ehdr) (((ehdr) >> 8) & 0xff)
+#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
+#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
+ (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
+ CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO)
+
+/**
+ * struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
+ */
+struct panthor_fw_binary_section_entry_hdr {
+ /** @flags: Section flags. */
+ u32 flags;
+
+ /** @va: MCU virtual range to map this binary section to. */
+ struct {
+ /** @start: Start address. */
+ u32 start;
+
+ /** @end: End address. */
+ u32 end;
+ } va;
+
+ /** @data: Data to initialize the FW section with. */
+ struct {
+ /** @start: Start offset in the FW binary. */
+ u32 start;
+
+ /** @end: End offset in the FW binary. */
+ u32 end;
+ } data;
+};
+
+/**
+ * struct panthor_fw_binary_iter - Firmware binary iterator
+ *
+ * Used to parse a firmware binary.
+ */
+struct panthor_fw_binary_iter {
+ /** @data: FW binary data. */
+ const void *data;
+
+ /** @size: FW binary size. */
+ size_t size;
+
+ /** @offset: Iterator offset. */
+ size_t offset;
+};
+
+/**
+ * struct panthor_fw_section - FW section
+ */
+struct panthor_fw_section {
+ /** @node: Used to keep track of FW sections. */
+ struct list_head node;
+
+ /** @flags: Section flags, as encoded in the FW binary. */
+ u32 flags;
+
+ /** @mem: Section memory. */
+ struct panthor_kernel_bo *mem;
+
+ /**
+ * @name: Name of the section, as specified in the binary.
+ *
+ * Can be NULL.
+ */
+ const char *name;
+
+ /**
+ * @data: Initial data copied to the FW memory.
+ *
+ * We keep data around so we can reload sections after a reset.
+ */
+ struct {
+ /** @buf: Buffed used to store init data. */
+ const void *buf;
+
+ /** @size: Size of @buf in bytes. */
+ size_t size;
+ } data;
+};
+
+#define CSF_MCU_SHARED_REGION_START 0x04000000ULL
+#define CSF_MCU_SHARED_REGION_SIZE 0x04000000ULL
+
+#define MIN_CS_PER_CSG 8
+#define MIN_CSGS 3
+#define MAX_CSG_PRIO 0xf
+
+#define CSF_IFACE_VERSION(major, minor, patch) \
+ (((major) << 24) | ((minor) << 16) | (patch))
+#define CSF_IFACE_VERSION_MAJOR(v) ((v) >> 24)
+#define CSF_IFACE_VERSION_MINOR(v) (((v) >> 16) & 0xff)
+#define CSF_IFACE_VERSION_PATCH(v) ((v) & 0xffff)
+
+#define CSF_GROUP_CONTROL_OFFSET 0x1000
+#define CSF_STREAM_CONTROL_OFFSET 0x40
+#define CSF_UNPRESERVED_REG_COUNT 4
+
+/**
+ * struct panthor_fw_iface - FW interfaces
+ */
+struct panthor_fw_iface {
+ /** @global: Global interface. */
+ struct panthor_fw_global_iface global;
+
+ /** @groups: Group slot interfaces. */
+ struct panthor_fw_csg_iface groups[MAX_CSGS];
+
+ /** @streams: Command stream slot interfaces. */
+ struct panthor_fw_cs_iface streams[MAX_CSGS][MAX_CS_PER_CSG];
+};
+
+/**
+ * struct panthor_fw - Firmware management
+ */
+struct panthor_fw {
+ /** @vm: MCU VM. */
+ struct panthor_vm *vm;
+
+ /** @sections: List of FW sections. */
+ struct list_head sections;
+
+ /** @shared_section: The section containing the FW interfaces. */
+ struct panthor_fw_section *shared_section;
+
+ /** @iface: FW interfaces. */
+ struct panthor_fw_iface iface;
+
+ /** @watchdog: Collection of fields relating to the FW watchdog. */
+ struct {
+ /** @ping_work: Delayed work used to ping the FW. */
+ struct delayed_work ping_work;
+ } watchdog;
+
+ /**
+ * @req_waitqueue: FW request waitqueue.
+ *
+ * Everytime a request is sent to a command stream group or the global
+ * interface, the caller will first busy wait for the request to be
+ * acknowledged, and then fallback to a sleeping wait.
+ *
+ * This wait queue is here to support the sleeping wait flavor.
+ */
+ wait_queue_head_t req_waitqueue;
+
+ /** @booted: True is the FW is booted */
+ bool booted;
+
+ /**
+ * @fast_reset: True if the post_reset logic can proceed with a fast reset.
+ *
+ * A fast reset is just a reset where the driver doesn't reload the FW sections.
+ *
+ * Any time the firmware is properly suspended, a fast reset can take place.
+ * On the other hand, if the halt operation failed, the driver will reload
+ * all sections to make sure we start from a fresh state.
+ */
+ bool fast_reset;
+
+ /** @irq: Job irq data. */
+ struct panthor_irq irq;
+};
+
+struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev)
+{
+ return ptdev->fw->vm;
+}
+
+/**
+ * panthor_fw_get_glb_iface() - Get the global interface
+ * @ptdev: Device.
+ *
+ * Return: The global interface.
+ */
+struct panthor_fw_global_iface *
+panthor_fw_get_glb_iface(struct panthor_device *ptdev)
+{
+ return &ptdev->fw->iface.global;
+}
+
+/**
+ * panthor_fw_get_csg_iface() - Get a command stream group slot interface
+ * @ptdev: Device.
+ * @csg_slot: Index of the command stream group slot.
+ *
+ * Return: The command stream group slot interface.
+ */
+struct panthor_fw_csg_iface *
+panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot)
+{
+ if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS))
+ return NULL;
+
+ return &ptdev->fw->iface.groups[csg_slot];
+}
+
+/**
+ * panthor_fw_get_cs_iface() - Get a command stream slot interface
+ * @ptdev: Device.
+ * @csg_slot: Index of the command stream group slot.
+ * @cs_slot: Index of the command stream slot.
+ *
+ * Return: The command stream slot interface.
+ */
+struct panthor_fw_cs_iface *
+panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot)
+{
+ if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS || cs_slot >= MAX_CS_PER_CSG))
+ return NULL;
+
+ return &ptdev->fw->iface.streams[csg_slot][cs_slot];
+}
+
+/**
+ * panthor_fw_conv_timeout() - Convert a timeout into a cycle-count
+ * @ptdev: Device.
+ * @timeout_us: Timeout expressed in micro-seconds.
+ *
+ * The FW has two timer sources: the GPU counter or arch-timer. We need
+ * to express timeouts in term of number of cycles and specify which
+ * timer source should be used.
+ *
+ * Return: A value suitable for timeout fields in the global interface.
+ */
+static u32 panthor_fw_conv_timeout(struct panthor_device *ptdev, u32 timeout_us)
+{
+ bool use_cycle_counter = false;
+ u32 timer_rate = 0;
+ u64 mod_cycles;
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+ timer_rate = arch_timer_get_cntfrq();
+#endif
+
+ if (!timer_rate) {
+ use_cycle_counter = true;
+ timer_rate = clk_get_rate(ptdev->clks.core);
+ }
+
+ if (drm_WARN_ON(&ptdev->base, !timer_rate)) {
+ /* We couldn't get a valid clock rate, let's just pick the
+ * maximum value so the FW still handles the core
+ * power on/off requests.
+ */
+ return GLB_TIMER_VAL(~0) |
+ GLB_TIMER_SOURCE_GPU_COUNTER;
+ }
+
+ mod_cycles = DIV_ROUND_UP_ULL((u64)timeout_us * timer_rate,
+ 1000000ull << 10);
+ if (drm_WARN_ON(&ptdev->base, mod_cycles > GLB_TIMER_VAL(~0)))
+ mod_cycles = GLB_TIMER_VAL(~0);
+
+ return GLB_TIMER_VAL(mod_cycles) |
+ (use_cycle_counter ? GLB_TIMER_SOURCE_GPU_COUNTER : 0);
+}
+
+static int panthor_fw_binary_iter_read(struct panthor_device *ptdev,
+ struct panthor_fw_binary_iter *iter,
+ void *out, size_t size)
+{
+ size_t new_offset = iter->offset + size;
+
+ if (new_offset > iter->size || new_offset < iter->offset) {
+ drm_err(&ptdev->base, "Firmware too small\n");
+ return -EINVAL;
+ }
+
+ memcpy(out, iter->data + iter->offset, size);
+ iter->offset = new_offset;
+ return 0;
+}
+
+static int panthor_fw_binary_sub_iter_init(struct panthor_device *ptdev,
+ struct panthor_fw_binary_iter *iter,
+ struct panthor_fw_binary_iter *sub_iter,
+ size_t size)
+{
+ size_t new_offset = iter->offset + size;
+
+ if (new_offset > iter->size || new_offset < iter->offset) {
+ drm_err(&ptdev->base, "Firmware entry too long\n");
+ return -EINVAL;
+ }
+
+ sub_iter->offset = 0;
+ sub_iter->data = iter->data + iter->offset;
+ sub_iter->size = size;
+ iter->offset = new_offset;
+ return 0;
+}
+
+static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
+ struct panthor_fw_section *section)
+{
+ bool was_mapped = !!section->mem->kmap;
+ int ret;
+
+ if (!section->data.size &&
+ !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO))
+ return;
+
+ ret = panthor_kernel_bo_vmap(section->mem);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ return;
+
+ memcpy(section->mem->kmap, section->data.buf, section->data.size);
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO) {
+ memset(section->mem->kmap + section->data.size, 0,
+ panthor_kernel_bo_size(section->mem) - section->data.size);
+ }
+
+ if (!was_mapped)
+ panthor_kernel_bo_vunmap(section->mem);
+}
+
+/**
+ * panthor_fw_alloc_queue_iface_mem() - Allocate a ring-buffer interfaces.
+ * @ptdev: Device.
+ * @input: Pointer holding the input interface on success.
+ * Should be ignored on failure.
+ * @output: Pointer holding the output interface on success.
+ * Should be ignored on failure.
+ * @input_fw_va: Pointer holding the input interface FW VA on success.
+ * Should be ignored on failure.
+ * @output_fw_va: Pointer holding the output interface FW VA on success.
+ * Should be ignored on failure.
+ *
+ * Allocates panthor_fw_ringbuf_{input,out}_iface interfaces. The input
+ * interface is at offset 0, and the output interface at offset 4096.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
+ struct panthor_fw_ringbuf_input_iface **input,
+ const struct panthor_fw_ringbuf_output_iface **output,
+ u32 *input_fw_va, u32 *output_fw_va)
+{
+ struct panthor_kernel_bo *mem;
+ int ret;
+
+ mem = panthor_kernel_bo_create(ptdev, ptdev->fw->vm, SZ_8K,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(mem))
+ return mem;
+
+ ret = panthor_kernel_bo_vmap(mem);
+ if (ret) {
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), mem);
+ return ERR_PTR(ret);
+ }
+
+ memset(mem->kmap, 0, panthor_kernel_bo_size(mem));
+ *input = mem->kmap;
+ *output = mem->kmap + SZ_4K;
+ *input_fw_va = panthor_kernel_bo_gpuva(mem);
+ *output_fw_va = *input_fw_va + SZ_4K;
+
+ return mem;
+}
+
+/**
+ * panthor_fw_alloc_suspend_buf_mem() - Allocate a suspend buffer for a command stream group.
+ * @ptdev: Device.
+ * @size: Size of the suspend buffer.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
+{
+ return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+}
+
+static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
+ const struct firmware *fw,
+ struct panthor_fw_binary_iter *iter,
+ u32 ehdr)
+{
+ struct panthor_fw_binary_section_entry_hdr hdr;
+ struct panthor_fw_section *section;
+ u32 section_size;
+ u32 name_len;
+ int ret;
+
+ ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
+ if (ret)
+ return ret;
+
+ if (hdr.data.end < hdr.data.start) {
+ drm_err(&ptdev->base, "Firmware corrupted, data.end < data.start (0x%x < 0x%x)\n",
+ hdr.data.end, hdr.data.start);
+ return -EINVAL;
+ }
+
+ if (hdr.va.end < hdr.va.start) {
+ drm_err(&ptdev->base, "Firmware corrupted, hdr.va.end < hdr.va.start (0x%x < 0x%x)\n",
+ hdr.va.end, hdr.va.start);
+ return -EINVAL;
+ }
+
+ if (hdr.data.end > fw->size) {
+ drm_err(&ptdev->base, "Firmware corrupted, file truncated? data_end=0x%x > fw size=0x%zx\n",
+ hdr.data.end, fw->size);
+ return -EINVAL;
+ }
+
+ if ((hdr.va.start & ~PAGE_MASK) != 0 ||
+ (hdr.va.end & ~PAGE_MASK) != 0) {
+ drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
+ hdr.va.start, hdr.va.end);
+ return -EINVAL;
+ }
+
+ if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
+ drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
+ hdr.flags);
+ return -EINVAL;
+ }
+
+ if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
+ drm_warn(&ptdev->base,
+ "Firmware protected mode entry not be supported, ignoring");
+ return 0;
+ }
+
+ if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
+ !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
+ drm_err(&ptdev->base,
+ "Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
+ return -EINVAL;
+ }
+
+ name_len = iter->size - iter->offset;
+
+ section = drmm_kzalloc(&ptdev->base, sizeof(*section), GFP_KERNEL);
+ if (!section)
+ return -ENOMEM;
+
+ list_add_tail(&section->node, &ptdev->fw->sections);
+ section->flags = hdr.flags;
+ section->data.size = hdr.data.end - hdr.data.start;
+
+ if (section->data.size > 0) {
+ void *data = drmm_kmalloc(&ptdev->base, section->data.size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data, fw->data + hdr.data.start, section->data.size);
+ section->data.buf = data;
+ }
+
+ if (name_len > 0) {
+ char *name = drmm_kmalloc(&ptdev->base, name_len + 1, GFP_KERNEL);
+
+ if (!name)
+ return -ENOMEM;
+
+ memcpy(name, iter->data + iter->offset, name_len);
+ name[name_len] = '\0';
+ section->name = name;
+ }
+
+ section_size = hdr.va.end - hdr.va.start;
+ if (section_size) {
+ u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
+ struct panthor_gem_object *bo;
+ u32 vm_map_flags = 0;
+ struct sg_table *sgt;
+ u64 va = hdr.va.start;
+
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
+
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
+
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are mapped to
+ * non-cacheable for now. We might want to introduce a new
+ * IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
+ * memory and is currently not used by our driver) for
+ * AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
+ * of IO-coherent systems.
+ */
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
+ vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
+
+ section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
+ section_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ vm_map_flags, va);
+ if (IS_ERR(section->mem))
+ return PTR_ERR(section->mem);
+
+ if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
+ return -EINVAL;
+
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
+ ret = panthor_kernel_bo_vmap(section->mem);
+ if (ret)
+ return ret;
+ }
+
+ panthor_fw_init_section_mem(ptdev, section);
+
+ bo = to_panthor_bo(section->mem->obj);
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
+ }
+
+ if (hdr.va.start == CSF_MCU_SHARED_REGION_START)
+ ptdev->fw->shared_section = section;
+
+ return 0;
+}
+
+static void
+panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
+{
+ struct panthor_fw_section *section;
+
+ list_for_each_entry(section, &ptdev->fw->sections, node) {
+ struct sg_table *sgt;
+
+ if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ continue;
+
+ panthor_fw_init_section_mem(ptdev, section);
+ sgt = drm_gem_shmem_get_pages_sgt(&to_panthor_bo(section->mem->obj)->base);
+ if (!drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(sgt)))
+ dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);
+ }
+}
+
+static int panthor_fw_load_entry(struct panthor_device *ptdev,
+ const struct firmware *fw,
+ struct panthor_fw_binary_iter *iter)
+{
+ struct panthor_fw_binary_iter eiter;
+ u32 ehdr;
+ int ret;
+
+ ret = panthor_fw_binary_iter_read(ptdev, iter, &ehdr, sizeof(ehdr));
+ if (ret)
+ return ret;
+
+ if ((iter->offset % sizeof(u32)) ||
+ (CSF_FW_BINARY_ENTRY_SIZE(ehdr) % sizeof(u32))) {
+ drm_err(&ptdev->base, "Firmware entry isn't 32 bit aligned, offset=0x%x size=0x%x\n",
+ (u32)(iter->offset - sizeof(u32)), CSF_FW_BINARY_ENTRY_SIZE(ehdr));
+ return -EINVAL;
+ }
+
+ if (panthor_fw_binary_sub_iter_init(ptdev, iter, &eiter,
+ CSF_FW_BINARY_ENTRY_SIZE(ehdr) - sizeof(ehdr)))
+ return -EINVAL;
+
+ switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) {
+ case CSF_FW_BINARY_ENTRY_TYPE_IFACE:
+ return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr);
+
+ /* FIXME: handle those entry types? */
+ case CSF_FW_BINARY_ENTRY_TYPE_CONFIG:
+ case CSF_FW_BINARY_ENTRY_TYPE_FUTF_TEST:
+ case CSF_FW_BINARY_ENTRY_TYPE_TRACE_BUFFER:
+ case CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA:
+ return 0;
+ default:
+ break;
+ }
+
+ if (ehdr & CSF_FW_BINARY_ENTRY_OPTIONAL)
+ return 0;
+
+ drm_err(&ptdev->base,
+ "Unsupported non-optional entry type %u in firmware\n",
+ CSF_FW_BINARY_ENTRY_TYPE(ehdr));
+ return -EINVAL;
+}
+
+static int panthor_fw_load(struct panthor_device *ptdev)
+{
+ const struct firmware *fw = NULL;
+ struct panthor_fw_binary_iter iter = {};
+ struct panthor_fw_binary_hdr hdr;
+ char fw_path[128];
+ int ret;
+
+ snprintf(fw_path, sizeof(fw_path), "arm/mali/arch%d.%d/%s",
+ (u32)GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id),
+ (u32)GPU_ARCH_MINOR(ptdev->gpu_info.gpu_id),
+ CSF_FW_NAME);
+
+ ret = request_firmware(&fw, fw_path, ptdev->base.dev);
+ if (ret) {
+ drm_err(&ptdev->base, "Failed to load firmware image '%s'\n",
+ CSF_FW_NAME);
+ return ret;
+ }
+
+ iter.data = fw->data;
+ iter.size = fw->size;
+ ret = panthor_fw_binary_iter_read(ptdev, &iter, &hdr, sizeof(hdr));
+ if (ret)
+ goto out;
+
+ if (hdr.magic != CSF_FW_BINARY_HEADER_MAGIC) {
+ ret = -EINVAL;
+ drm_err(&ptdev->base, "Invalid firmware magic\n");
+ goto out;
+ }
+
+ if (hdr.major != CSF_FW_BINARY_HEADER_MAJOR_MAX) {
+ ret = -EINVAL;
+ drm_err(&ptdev->base, "Unsupported firmware binary header version %d.%d (expected %d.x)\n",
+ hdr.major, hdr.minor, CSF_FW_BINARY_HEADER_MAJOR_MAX);
+ goto out;
+ }
+
+ if (hdr.size > iter.size) {
+ drm_err(&ptdev->base, "Firmware image is truncated\n");
+ goto out;
+ }
+
+ iter.size = hdr.size;
+
+ while (iter.offset < hdr.size) {
+ ret = panthor_fw_load_entry(ptdev, fw, &iter);
+ if (ret)
+ goto out;
+ }
+
+ if (!ptdev->fw->shared_section) {
+ drm_err(&ptdev->base, "Shared interface region not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+/**
+ * iface_fw_to_cpu_addr() - Turn an MCU address into a CPU address
+ * @ptdev: Device.
+ * @mcu_va: MCU address.
+ *
+ * Return: NULL if the address is not part of the shared section, non-NULL otherwise.
+ */
+static void *iface_fw_to_cpu_addr(struct panthor_device *ptdev, u32 mcu_va)
+{
+ u64 shared_mem_start = panthor_kernel_bo_gpuva(ptdev->fw->shared_section->mem);
+ u64 shared_mem_end = shared_mem_start +
+ panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ if (mcu_va < shared_mem_start || mcu_va >= shared_mem_end)
+ return NULL;
+
+ return ptdev->fw->shared_section->mem->kmap + (mcu_va - shared_mem_start);
+}
+
+static int panthor_init_cs_iface(struct panthor_device *ptdev,
+ unsigned int csg_idx, unsigned int cs_idx)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_idx);
+ struct panthor_fw_cs_iface *cs_iface = &ptdev->fw->iface.streams[csg_idx][cs_idx];
+ u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ u32 iface_offset = CSF_GROUP_CONTROL_OFFSET +
+ (csg_idx * glb_iface->control->group_stride) +
+ CSF_STREAM_CONTROL_OFFSET +
+ (cs_idx * csg_iface->control->stream_stride);
+ struct panthor_fw_cs_iface *first_cs_iface =
+ panthor_fw_get_cs_iface(ptdev, 0, 0);
+
+ if (iface_offset + sizeof(*cs_iface) >= shared_section_sz)
+ return -EINVAL;
+
+ spin_lock_init(&cs_iface->lock);
+ cs_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
+ cs_iface->input = iface_fw_to_cpu_addr(ptdev, cs_iface->control->input_va);
+ cs_iface->output = iface_fw_to_cpu_addr(ptdev, cs_iface->control->output_va);
+
+ if (!cs_iface->input || !cs_iface->output) {
+ drm_err(&ptdev->base, "Invalid stream control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (cs_iface != first_cs_iface) {
+ if (cs_iface->control->features != first_cs_iface->control->features) {
+ drm_err(&ptdev->base, "Expecting identical CS slots");
+ return -EINVAL;
+ }
+ } else {
+ u32 reg_count = CS_FEATURES_WORK_REGS(cs_iface->control->features);
+
+ ptdev->csif_info.cs_reg_count = reg_count;
+ ptdev->csif_info.unpreserved_cs_reg_count = CSF_UNPRESERVED_REG_COUNT;
+ }
+
+ return 0;
+}
+
+static bool compare_csg(const struct panthor_fw_csg_control_iface *a,
+ const struct panthor_fw_csg_control_iface *b)
+{
+ if (a->features != b->features)
+ return false;
+ if (a->suspend_size != b->suspend_size)
+ return false;
+ if (a->protm_suspend_size != b->protm_suspend_size)
+ return false;
+ if (a->stream_num != b->stream_num)
+ return false;
+ return true;
+}
+
+static int panthor_init_csg_iface(struct panthor_device *ptdev,
+ unsigned int csg_idx)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = &ptdev->fw->iface.groups[csg_idx];
+ u64 shared_section_sz = panthor_kernel_bo_size(ptdev->fw->shared_section->mem);
+ u32 iface_offset = CSF_GROUP_CONTROL_OFFSET + (csg_idx * glb_iface->control->group_stride);
+ unsigned int i;
+
+ if (iface_offset + sizeof(*csg_iface) >= shared_section_sz)
+ return -EINVAL;
+
+ spin_lock_init(&csg_iface->lock);
+ csg_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset;
+ csg_iface->input = iface_fw_to_cpu_addr(ptdev, csg_iface->control->input_va);
+ csg_iface->output = iface_fw_to_cpu_addr(ptdev, csg_iface->control->output_va);
+
+ if (csg_iface->control->stream_num < MIN_CS_PER_CSG ||
+ csg_iface->control->stream_num > MAX_CS_PER_CSG)
+ return -EINVAL;
+
+ if (!csg_iface->input || !csg_iface->output) {
+ drm_err(&ptdev->base, "Invalid group control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (csg_idx > 0) {
+ struct panthor_fw_csg_iface *first_csg_iface =
+ panthor_fw_get_csg_iface(ptdev, 0);
+
+ if (!compare_csg(first_csg_iface->control, csg_iface->control)) {
+ drm_err(&ptdev->base, "Expecting identical CSG slots");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < csg_iface->control->stream_num; i++) {
+ int ret = panthor_init_cs_iface(ptdev, csg_idx, i);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static u32 panthor_get_instr_features(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ if (glb_iface->control->version < CSF_IFACE_VERSION(1, 1, 0))
+ return 0;
+
+ return glb_iface->control->instr_features;
+}
+
+static int panthor_fw_init_ifaces(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = &ptdev->fw->iface.global;
+ unsigned int i;
+
+ if (!ptdev->fw->shared_section->mem->kmap)
+ return -EINVAL;
+
+ spin_lock_init(&glb_iface->lock);
+ glb_iface->control = ptdev->fw->shared_section->mem->kmap;
+
+ if (!glb_iface->control->version) {
+ drm_err(&ptdev->base, "Firmware version is 0. Firmware may have failed to boot");
+ return -EINVAL;
+ }
+
+ glb_iface->input = iface_fw_to_cpu_addr(ptdev, glb_iface->control->input_va);
+ glb_iface->output = iface_fw_to_cpu_addr(ptdev, glb_iface->control->output_va);
+ if (!glb_iface->input || !glb_iface->output) {
+ drm_err(&ptdev->base, "Invalid global control interface input/output VA");
+ return -EINVAL;
+ }
+
+ if (glb_iface->control->group_num > MAX_CSGS ||
+ glb_iface->control->group_num < MIN_CSGS) {
+ drm_err(&ptdev->base, "Invalid number of control groups");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < glb_iface->control->group_num; i++) {
+ int ret = panthor_init_csg_iface(ptdev, i);
+
+ if (ret)
+ return ret;
+ }
+
+ drm_info(&ptdev->base, "CSF FW v%d.%d.%d, Features %#x Instrumentation features %#x",
+ CSF_IFACE_VERSION_MAJOR(glb_iface->control->version),
+ CSF_IFACE_VERSION_MINOR(glb_iface->control->version),
+ CSF_IFACE_VERSION_PATCH(glb_iface->control->version),
+ glb_iface->control->features,
+ panthor_get_instr_features(ptdev));
+ return 0;
+}
+
+static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ /* Enable all cores. */
+ glb_iface->input->core_en_mask = ptdev->gpu_info.shader_present;
+
+ /* Setup timers. */
+ glb_iface->input->poweroff_timer = panthor_fw_conv_timeout(ptdev, PWROFF_HYSTERESIS_US);
+ glb_iface->input->progress_timer = PROGRESS_TIMEOUT_CYCLES >> PROGRESS_TIMEOUT_SCALE_SHIFT;
+ glb_iface->input->idle_timer = panthor_fw_conv_timeout(ptdev, IDLE_HYSTERESIS_US);
+
+ /* Enable interrupts we care about. */
+ glb_iface->input->ack_irq_mask = GLB_CFG_ALLOC_EN |
+ GLB_PING |
+ GLB_CFG_PROGRESS_TIMER |
+ GLB_CFG_POWEROFF_TIMER |
+ GLB_IDLE_EN |
+ GLB_IDLE;
+
+ panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN);
+ panthor_fw_toggle_reqs(glb_iface, req, ack,
+ GLB_CFG_ALLOC_EN |
+ GLB_CFG_POWEROFF_TIMER |
+ GLB_CFG_PROGRESS_TIMER);
+
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+
+ /* Kick the watchdog. */
+ mod_delayed_work(ptdev->reset.wq, &ptdev->fw->watchdog.ping_work,
+ msecs_to_jiffies(PING_INTERVAL_MS));
+}
+
+static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
+ ptdev->fw->booted = true;
+
+ wake_up_all(&ptdev->fw->req_waitqueue);
+
+ /* If the FW is not booted, don't process IRQs, just flag the FW as booted. */
+ if (!ptdev->fw->booted)
+ return;
+
+ panthor_sched_report_fw_events(ptdev, status);
+}
+PANTHOR_IRQ_HANDLER(job, JOB, panthor_job_irq_handler);
+
+static int panthor_fw_start(struct panthor_device *ptdev)
+{
+ bool timedout = false;
+
+ ptdev->fw->booted = false;
+ panthor_job_irq_resume(&ptdev->fw->irq, ~0);
+ gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_AUTO);
+
+ if (!wait_event_timeout(ptdev->fw->req_waitqueue,
+ ptdev->fw->booted,
+ msecs_to_jiffies(1000))) {
+ if (!ptdev->fw->booted &&
+ !(gpu_read(ptdev, JOB_INT_STAT) & JOB_INT_GLOBAL_IF))
+ timedout = true;
+ }
+
+ if (timedout) {
+ static const char * const status_str[] = {
+ [MCU_STATUS_DISABLED] = "disabled",
+ [MCU_STATUS_ENABLED] = "enabled",
+ [MCU_STATUS_HALT] = "halt",
+ [MCU_STATUS_FATAL] = "fatal",
+ };
+ u32 status = gpu_read(ptdev, MCU_STATUS);
+
+ drm_err(&ptdev->base, "Failed to boot MCU (status=%s)",
+ status < ARRAY_SIZE(status_str) ? status_str[status] : "unknown");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void panthor_fw_stop(struct panthor_device *ptdev)
+{
+ u32 status;
+
+ gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
+ if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
+ drm_err(&ptdev->base, "Failed to stop MCU");
+}
+
+/**
+ * panthor_fw_pre_reset() - Call before a reset.
+ * @ptdev: Device.
+ * @on_hang: true if the reset was triggered on a GPU hang.
+ *
+ * If the reset is not triggered on a hang, we try to gracefully halt the
+ * MCU, so we can do a fast-reset when panthor_fw_post_reset() is called.
+ */
+void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
+{
+ /* Make sure we won't be woken up by a ping. */
+ cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
+
+ ptdev->fw->fast_reset = false;
+
+ if (!on_hang) {
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 status;
+
+ panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+ if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10, 100000) &&
+ glb_iface->output->halt_status == PANTHOR_FW_HALT_OK) {
+ ptdev->fw->fast_reset = true;
+ } else {
+ drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
+ }
+
+ /* The FW detects 0 -> 1 transitions. Make sure we reset
+ * the HALT bit before the FW is rebooted.
+ */
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
+ }
+
+ panthor_job_irq_suspend(&ptdev->fw->irq);
+}
+
+/**
+ * panthor_fw_post_reset() - Call after a reset.
+ * @ptdev: Device.
+ *
+ * Start the FW. If this is not a fast reset, all FW sections are reloaded to
+ * make sure we can recover from a memory corruption.
+ */
+int panthor_fw_post_reset(struct panthor_device *ptdev)
+{
+ int ret;
+
+ /* Make the MCU VM active. */
+ ret = panthor_vm_active(ptdev->fw->vm);
+ if (ret)
+ return ret;
+
+ /* If this is a fast reset, try to start the MCU without reloading
+ * the FW sections. If it fails, go for a full reset.
+ */
+ if (ptdev->fw->fast_reset) {
+ ret = panthor_fw_start(ptdev);
+ if (!ret)
+ goto out;
+
+ /* Force a disable, so we get a fresh boot on the next
+ * panthor_fw_start() call.
+ */
+ gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
+ drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+ }
+
+ /* Reload all sections, including RO ones. We're not supposed
+ * to end up here anyway, let's just assume the overhead of
+ * reloading everything is acceptable.
+ */
+ panthor_reload_fw_sections(ptdev, true);
+
+ ret = panthor_fw_start(ptdev);
+ if (ret) {
+ drm_err(&ptdev->base, "FW slow reset failed");
+ return ret;
+ }
+
+out:
+ /* We must re-initialize the global interface even on fast-reset. */
+ panthor_fw_init_global_iface(ptdev);
+ return 0;
+}
+
+/**
+ * panthor_fw_unplug() - Called when the device is unplugged.
+ * @ptdev: Device.
+ *
+ * This function must make sure all pending operations are flushed before
+ * will release device resources, thus preventing any interaction with
+ * the HW.
+ *
+ * If there is still FW-related work running after this function returns,
+ * they must use drm_dev_{enter,exit}() and skip any HW access when
+ * drm_dev_enter() returns false.
+ */
+void panthor_fw_unplug(struct panthor_device *ptdev)
+{
+ struct panthor_fw_section *section;
+
+ cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
+
+ /* Make sure the IRQ handler can be called after that point. */
+ if (ptdev->fw->irq.irq)
+ panthor_job_irq_suspend(&ptdev->fw->irq);
+
+ panthor_fw_stop(ptdev);
+
+ list_for_each_entry(section, &ptdev->fw->sections, node)
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), section->mem);
+
+ /* We intentionally don't call panthor_vm_idle() and let
+ * panthor_mmu_unplug() release the AS we acquired with
+ * panthor_vm_active() so we don't have to track the VM active/idle
+ * state to keep the active_refcnt balanced.
+ */
+ panthor_vm_put(ptdev->fw->vm);
+
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+}
+
+/**
+ * panthor_fw_wait_acks() - Wait for requests to be acknowledged by the FW.
+ * @req_ptr: Pointer to the req register.
+ * @ack_ptr: Pointer to the ack register.
+ * @wq: Wait queue to use for the sleeping wait.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+static int panthor_fw_wait_acks(const u32 *req_ptr, const u32 *ack_ptr,
+ wait_queue_head_t *wq,
+ u32 req_mask, u32 *acked,
+ u32 timeout_ms)
+{
+ u32 ack, req = READ_ONCE(*req_ptr) & req_mask;
+ int ret;
+
+ /* Busy wait for a few µsecs before falling back to a sleeping wait. */
+ *acked = req_mask;
+ ret = read_poll_timeout_atomic(READ_ONCE, ack,
+ (ack & req_mask) == req,
+ 0, 10, 0,
+ *ack_ptr);
+ if (!ret)
+ return 0;
+
+ if (wait_event_timeout(*wq, (READ_ONCE(*ack_ptr) & req_mask) == req,
+ msecs_to_jiffies(timeout_ms)))
+ return 0;
+
+ /* Check one last time, in case we were not woken up for some reason. */
+ ack = READ_ONCE(*ack_ptr);
+ if ((ack & req_mask) == req)
+ return 0;
+
+ *acked = ~(req ^ ack) & req_mask;
+ return -ETIMEDOUT;
+}
+
+/**
+ * panthor_fw_glb_wait_acks() - Wait for global requests to be acknowledged.
+ * @ptdev: Device.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+int panthor_fw_glb_wait_acks(struct panthor_device *ptdev,
+ u32 req_mask, u32 *acked,
+ u32 timeout_ms)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ /* GLB_HALT doesn't get acked through the FW interface. */
+ if (drm_WARN_ON(&ptdev->base, req_mask & (~GLB_REQ_MASK | GLB_HALT)))
+ return -EINVAL;
+
+ return panthor_fw_wait_acks(&glb_iface->input->req,
+ &glb_iface->output->ack,
+ &ptdev->fw->req_waitqueue,
+ req_mask, acked, timeout_ms);
+}
+
+/**
+ * panthor_fw_csg_wait_acks() - Wait for command stream group requests to be acknowledged.
+ * @ptdev: Device.
+ * @csg_slot: CSG slot ID.
+ * @req_mask: Mask of requests to wait for.
+ * @acked: Pointer to field that's updated with the acked requests.
+ * If the function returns 0, *acked == req_mask.
+ * @timeout_ms: Timeout expressed in milliseconds.
+ *
+ * Return: 0 on success, -ETIMEDOUT otherwise.
+ */
+int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_slot,
+ u32 req_mask, u32 *acked, u32 timeout_ms)
+{
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, csg_slot);
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, req_mask & ~CSG_REQ_MASK))
+ return -EINVAL;
+
+ ret = panthor_fw_wait_acks(&csg_iface->input->req,
+ &csg_iface->output->ack,
+ &ptdev->fw->req_waitqueue,
+ req_mask, acked, timeout_ms);
+
+ /*
+ * Check that all bits in the state field were updated, if any mismatch
+ * then clear all bits in the state field. This allows code to do
+ * (acked & CSG_STATE_MASK) and get the right value.
+ */
+
+ if ((*acked & CSG_STATE_MASK) != CSG_STATE_MASK)
+ *acked &= ~CSG_STATE_MASK;
+
+ return ret;
+}
+
+/**
+ * panthor_fw_ring_csg_doorbells() - Ring command stream group doorbells.
+ * @ptdev: Device.
+ * @csg_mask: Bitmask encoding the command stream group doorbells to ring.
+ *
+ * This function is toggling bits in the doorbell_req and ringing the
+ * global doorbell. It doesn't require a user doorbell to be attached to
+ * the group.
+ */
+void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_mask)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ panthor_fw_toggle_reqs(glb_iface, doorbell_req, doorbell_ack, csg_mask);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+}
+
+static void panthor_fw_ping_work(struct work_struct *work)
+{
+ struct panthor_fw *fw = container_of(work, struct panthor_fw, watchdog.ping_work.work);
+ struct panthor_device *ptdev = fw->irq.ptdev;
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 acked;
+ int ret;
+
+ if (panthor_device_reset_is_pending(ptdev))
+ return;
+
+ panthor_fw_toggle_reqs(glb_iface, req, ack, GLB_PING);
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+
+ ret = panthor_fw_glb_wait_acks(ptdev, GLB_PING, &acked, 100);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ drm_err(&ptdev->base, "FW ping timeout, scheduling a reset");
+ } else {
+ mod_delayed_work(ptdev->reset.wq, &fw->watchdog.ping_work,
+ msecs_to_jiffies(PING_INTERVAL_MS));
+ }
+}
+
+/**
+ * panthor_fw_init() - Initialize FW related data.
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_fw_init(struct panthor_device *ptdev)
+{
+ struct panthor_fw *fw;
+ int ret, irq;
+
+ fw = drmm_kzalloc(&ptdev->base, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ ptdev->fw = fw;
+ init_waitqueue_head(&fw->req_waitqueue);
+ INIT_LIST_HEAD(&fw->sections);
+ INIT_DELAYED_WORK(&fw->watchdog.ping_work, panthor_fw_ping_work);
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "job");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = panthor_request_job_irq(ptdev, &fw->irq, irq, 0);
+ if (ret) {
+ drm_err(&ptdev->base, "failed to request job irq");
+ return ret;
+ }
+
+ ret = panthor_gpu_l2_power_on(ptdev);
+ if (ret)
+ return ret;
+
+ fw->vm = panthor_vm_create(ptdev, true,
+ 0, SZ_4G,
+ CSF_MCU_SHARED_REGION_START,
+ CSF_MCU_SHARED_REGION_SIZE);
+ if (IS_ERR(fw->vm)) {
+ ret = PTR_ERR(fw->vm);
+ fw->vm = NULL;
+ goto err_unplug_fw;
+ }
+
+ ret = panthor_fw_load(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_vm_active(fw->vm);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_fw_start(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ ret = panthor_fw_init_ifaces(ptdev);
+ if (ret)
+ goto err_unplug_fw;
+
+ panthor_fw_init_global_iface(ptdev);
+ return 0;
+
+err_unplug_fw:
+ panthor_fw_unplug(ptdev);
+ return ret;
+}
+
+MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
new file mode 100644
index 000000000000..22448abde992
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_fw.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_MCU_H__
+#define __PANTHOR_MCU_H__
+
+#include <linux/types.h>
+
+struct panthor_device;
+struct panthor_kernel_bo;
+
+#define MAX_CSGS 31
+#define MAX_CS_PER_CSG 32
+
+struct panthor_fw_ringbuf_input_iface {
+ u64 insert;
+ u64 extract;
+};
+
+struct panthor_fw_ringbuf_output_iface {
+ u64 extract;
+ u32 active;
+};
+
+struct panthor_fw_cs_control_iface {
+#define CS_FEATURES_WORK_REGS(x) (((x) & GENMASK(7, 0)) + 1)
+#define CS_FEATURES_SCOREBOARDS(x) (((x) & GENMASK(15, 8)) >> 8)
+#define CS_FEATURES_COMPUTE BIT(16)
+#define CS_FEATURES_FRAGMENT BIT(17)
+#define CS_FEATURES_TILER BIT(18)
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+};
+
+struct panthor_fw_cs_input_iface {
+#define CS_STATE_MASK GENMASK(2, 0)
+#define CS_STATE_STOP 0
+#define CS_STATE_START 1
+#define CS_EXTRACT_EVENT BIT(4)
+#define CS_IDLE_SYNC_WAIT BIT(8)
+#define CS_IDLE_PROTM_PENDING BIT(9)
+#define CS_IDLE_EMPTY BIT(10)
+#define CS_IDLE_RESOURCE_REQ BIT(11)
+#define CS_TILER_OOM BIT(26)
+#define CS_PROTM_PENDING BIT(27)
+#define CS_FATAL BIT(30)
+#define CS_FAULT BIT(31)
+#define CS_REQ_MASK (CS_STATE_MASK | \
+ CS_EXTRACT_EVENT | \
+ CS_IDLE_SYNC_WAIT | \
+ CS_IDLE_PROTM_PENDING | \
+ CS_IDLE_EMPTY | \
+ CS_IDLE_RESOURCE_REQ)
+#define CS_EVT_MASK (CS_TILER_OOM | \
+ CS_PROTM_PENDING | \
+ CS_FATAL | \
+ CS_FAULT)
+ u32 req;
+
+#define CS_CONFIG_PRIORITY(x) ((x) & GENMASK(3, 0))
+#define CS_CONFIG_DOORBELL(x) (((x) << 8) & GENMASK(15, 8))
+ u32 config;
+ u32 reserved1;
+ u32 ack_irq_mask;
+ u64 ringbuf_base;
+ u32 ringbuf_size;
+ u32 reserved2;
+ u64 heap_start;
+ u64 heap_end;
+ u64 ringbuf_input;
+ u64 ringbuf_output;
+ u32 instr_config;
+ u32 instrbuf_size;
+ u64 instrbuf_base;
+ u64 instrbuf_offset_ptr;
+};
+
+struct panthor_fw_cs_output_iface {
+ u32 ack;
+ u32 reserved1[15];
+ u64 status_cmd_ptr;
+
+#define CS_STATUS_WAIT_SB_MASK GENMASK(15, 0)
+#define CS_STATUS_WAIT_SB_SRC_MASK GENMASK(19, 16)
+#define CS_STATUS_WAIT_SB_SRC_NONE (0 << 16)
+#define CS_STATUS_WAIT_SB_SRC_WAIT (8 << 16)
+#define CS_STATUS_WAIT_SYNC_COND_LE (0 << 24)
+#define CS_STATUS_WAIT_SYNC_COND_GT (1 << 24)
+#define CS_STATUS_WAIT_SYNC_COND_MASK GENMASK(27, 24)
+#define CS_STATUS_WAIT_PROGRESS BIT(28)
+#define CS_STATUS_WAIT_PROTM BIT(29)
+#define CS_STATUS_WAIT_SYNC_64B BIT(30)
+#define CS_STATUS_WAIT_SYNC BIT(31)
+ u32 status_wait;
+ u32 status_req_resource;
+ u64 status_wait_sync_ptr;
+ u32 status_wait_sync_value;
+ u32 status_scoreboards;
+
+#define CS_STATUS_BLOCKED_REASON_UNBLOCKED 0
+#define CS_STATUS_BLOCKED_REASON_SB_WAIT 1
+#define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2
+#define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3
+#define CS_STATUS_BLOCKED_REASON_DEFERRED 5
+#define CS_STATUS_BLOCKED_REASON_RES 6
+#define CS_STATUS_BLOCKED_REASON_FLUSH 7
+#define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0)
+ u32 status_blocked_reason;
+ u32 status_wait_sync_value_hi;
+ u32 reserved2[6];
+
+#define CS_EXCEPTION_TYPE(x) ((x) & GENMASK(7, 0))
+#define CS_EXCEPTION_DATA(x) (((x) >> 8) & GENMASK(23, 0))
+ u32 fault;
+ u32 fatal;
+ u64 fault_info;
+ u64 fatal_info;
+ u32 reserved3[10];
+ u32 heap_vt_start;
+ u32 heap_vt_end;
+ u32 reserved4;
+ u32 heap_frag_end;
+ u64 heap_address;
+};
+
+struct panthor_fw_csg_control_iface {
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+ u32 suspend_size;
+ u32 protm_suspend_size;
+ u32 stream_num;
+ u32 stream_stride;
+};
+
+struct panthor_fw_csg_input_iface {
+#define CSG_STATE_MASK GENMASK(2, 0)
+#define CSG_STATE_TERMINATE 0
+#define CSG_STATE_START 1
+#define CSG_STATE_SUSPEND 2
+#define CSG_STATE_RESUME 3
+#define CSG_ENDPOINT_CONFIG BIT(4)
+#define CSG_STATUS_UPDATE BIT(5)
+#define CSG_SYNC_UPDATE BIT(28)
+#define CSG_IDLE BIT(29)
+#define CSG_DOORBELL BIT(30)
+#define CSG_PROGRESS_TIMER_EVENT BIT(31)
+#define CSG_REQ_MASK (CSG_STATE_MASK | \
+ CSG_ENDPOINT_CONFIG | \
+ CSG_STATUS_UPDATE)
+#define CSG_EVT_MASK (CSG_SYNC_UPDATE | \
+ CSG_IDLE | \
+ CSG_PROGRESS_TIMER_EVENT)
+ u32 req;
+ u32 ack_irq_mask;
+
+ u32 doorbell_req;
+ u32 cs_irq_ack;
+ u32 reserved1[4];
+ u64 allow_compute;
+ u64 allow_fragment;
+ u32 allow_other;
+
+#define CSG_EP_REQ_COMPUTE(x) ((x) & GENMASK(7, 0))
+#define CSG_EP_REQ_FRAGMENT(x) (((x) << 8) & GENMASK(15, 8))
+#define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16))
+#define CSG_EP_REQ_EXCL_COMPUTE BIT(20)
+#define CSG_EP_REQ_EXCL_FRAGMENT BIT(21)
+#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28))
+#define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28)
+ u32 endpoint_req;
+ u32 reserved2[2];
+ u64 suspend_buf;
+ u64 protm_suspend_buf;
+ u32 config;
+ u32 iter_trace_config;
+};
+
+struct panthor_fw_csg_output_iface {
+ u32 ack;
+ u32 reserved1;
+ u32 doorbell_ack;
+ u32 cs_irq_req;
+ u32 status_endpoint_current;
+ u32 status_endpoint_req;
+
+#define CSG_STATUS_STATE_IS_IDLE BIT(0)
+ u32 status_state;
+ u32 resource_dep;
+};
+
+struct panthor_fw_global_control_iface {
+ u32 version;
+ u32 features;
+ u32 input_va;
+ u32 output_va;
+ u32 group_num;
+ u32 group_stride;
+ u32 perfcnt_size;
+ u32 instr_features;
+};
+
+struct panthor_fw_global_input_iface {
+#define GLB_HALT BIT(0)
+#define GLB_CFG_PROGRESS_TIMER BIT(1)
+#define GLB_CFG_ALLOC_EN BIT(2)
+#define GLB_CFG_POWEROFF_TIMER BIT(3)
+#define GLB_PROTM_ENTER BIT(4)
+#define GLB_PERFCNT_EN BIT(5)
+#define GLB_PERFCNT_SAMPLE BIT(6)
+#define GLB_COUNTER_EN BIT(7)
+#define GLB_PING BIT(8)
+#define GLB_FWCFG_UPDATE BIT(9)
+#define GLB_IDLE_EN BIT(10)
+#define GLB_SLEEP BIT(12)
+#define GLB_INACTIVE_COMPUTE BIT(20)
+#define GLB_INACTIVE_FRAGMENT BIT(21)
+#define GLB_INACTIVE_TILER BIT(22)
+#define GLB_PROTM_EXIT BIT(23)
+#define GLB_PERFCNT_THRESHOLD BIT(24)
+#define GLB_PERFCNT_OVERFLOW BIT(25)
+#define GLB_IDLE BIT(26)
+#define GLB_DBG_CSF BIT(30)
+#define GLB_DBG_HOST BIT(31)
+#define GLB_REQ_MASK GENMASK(10, 0)
+#define GLB_EVT_MASK GENMASK(26, 20)
+ u32 req;
+ u32 ack_irq_mask;
+ u32 doorbell_req;
+ u32 reserved1;
+ u32 progress_timer;
+
+#define GLB_TIMER_VAL(x) ((x) & GENMASK(30, 0))
+#define GLB_TIMER_SOURCE_GPU_COUNTER BIT(31)
+ u32 poweroff_timer;
+ u64 core_en_mask;
+ u32 reserved2;
+ u32 perfcnt_as;
+ u64 perfcnt_base;
+ u32 perfcnt_extract;
+ u32 reserved3[3];
+ u32 perfcnt_config;
+ u32 perfcnt_csg_select;
+ u32 perfcnt_fw_enable;
+ u32 perfcnt_csg_enable;
+ u32 perfcnt_csf_enable;
+ u32 perfcnt_shader_enable;
+ u32 perfcnt_tiler_enable;
+ u32 perfcnt_mmu_l2_enable;
+ u32 reserved4[8];
+ u32 idle_timer;
+};
+
+enum panthor_fw_halt_status {
+ PANTHOR_FW_HALT_OK = 0,
+ PANTHOR_FW_HALT_ON_PANIC = 0x4e,
+ PANTHOR_FW_HALT_ON_WATCHDOG_EXPIRATION = 0x4f,
+};
+
+struct panthor_fw_global_output_iface {
+ u32 ack;
+ u32 reserved1;
+ u32 doorbell_ack;
+ u32 reserved2;
+ u32 halt_status;
+ u32 perfcnt_status;
+ u32 perfcnt_insert;
+};
+
+/**
+ * struct panthor_fw_cs_iface - Firmware command stream slot interface
+ */
+struct panthor_fw_cs_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_cs_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream slot control interface.
+ *
+ * Used to expose command stream slot properties.
+ *
+ * This interface is read-only.
+ */
+ struct panthor_fw_cs_control_iface *control;
+
+ /**
+ * @input: Command stream slot input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_cs_input_iface *input;
+
+ /**
+ * @output: Command stream slot output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_cs_output_iface *output;
+};
+
+/**
+ * struct panthor_fw_csg_iface - Firmware command stream group slot interface
+ */
+struct panthor_fw_csg_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_csg_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream group slot control interface.
+ *
+ * Used to expose command stream group slot properties.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_csg_control_iface *control;
+
+ /**
+ * @input: Command stream slot input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_csg_input_iface *input;
+
+ /**
+ * @output: Command stream group slot output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_csg_output_iface *output;
+};
+
+/**
+ * struct panthor_fw_global_iface - Firmware global interface
+ */
+struct panthor_fw_global_iface {
+ /**
+ * @lock: Lock protecting access to the panthor_fw_global_input_iface::req
+ * field.
+ *
+ * Needed so we can update the req field concurrently from the interrupt
+ * handler and the scheduler/FW management logic.
+ *
+ * TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
+ * interface sections are mapped uncached/write-combined right now, and
+ * using cmpxchg() on such mappings leads to SError faults. Revisit when
+ * we have 'SHARED' GPU mappings hooked up.
+ */
+ spinlock_t lock;
+
+ /**
+ * @control: Command stream group slot control interface.
+ *
+ * Used to expose global FW properties.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_global_control_iface *control;
+
+ /**
+ * @input: Global input interface.
+ *
+ * Used for host updates/events.
+ */
+ struct panthor_fw_global_input_iface *input;
+
+ /**
+ * @output: Global output interface.
+ *
+ * Used for FW updates/events.
+ *
+ * This interface is read-only.
+ */
+ const struct panthor_fw_global_output_iface *output;
+};
+
+/**
+ * panthor_fw_toggle_reqs() - Toggle acknowledge bits to send an event to the FW
+ * @__iface: The interface to operate on.
+ * @__in_reg: Name of the register to update in the input section of the interface.
+ * @__out_reg: Name of the register to take as a reference in the output section of the
+ * interface.
+ * @__mask: Mask to apply to the update.
+ *
+ * The Host -> FW event/message passing was designed to be lockless, with each side of
+ * the channel having its writeable section. Events are signaled as a difference between
+ * the host and FW side in the req/ack registers (when a bit differs, there's an event
+ * pending, when they are the same, nothing needs attention).
+ *
+ * This helper allows one to update the req register based on the current value of the
+ * ack register managed by the FW. Toggling a specific bit will flag an event. In order
+ * for events to be re-evaluated, the interface doorbell needs to be rung.
+ *
+ * Concurrent accesses to the same req register is covered.
+ *
+ * Anything requiring atomic updates to multiple registers requires a dedicated lock.
+ */
+#define panthor_fw_toggle_reqs(__iface, __in_reg, __out_reg, __mask) \
+ do { \
+ u32 __cur_val, __new_val, __out_val; \
+ spin_lock(&(__iface)->lock); \
+ __cur_val = READ_ONCE((__iface)->input->__in_reg); \
+ __out_val = READ_ONCE((__iface)->output->__out_reg); \
+ __new_val = ((__out_val ^ (__mask)) & (__mask)) | (__cur_val & ~(__mask)); \
+ WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
+ spin_unlock(&(__iface)->lock); \
+ } while (0)
+
+/**
+ * panthor_fw_update_reqs() - Update bits to reflect a configuration change
+ * @__iface: The interface to operate on.
+ * @__in_reg: Name of the register to update in the input section of the interface.
+ * @__val: Value to set.
+ * @__mask: Mask to apply to the update.
+ *
+ * Some configuration get passed through req registers that are also used to
+ * send events to the FW. Those req registers being updated from the interrupt
+ * handler, they require special helpers to update the configuration part as well.
+ *
+ * Concurrent accesses to the same req register is covered.
+ *
+ * Anything requiring atomic updates to multiple registers requires a dedicated lock.
+ */
+#define panthor_fw_update_reqs(__iface, __in_reg, __val, __mask) \
+ do { \
+ u32 __cur_val, __new_val; \
+ spin_lock(&(__iface)->lock); \
+ __cur_val = READ_ONCE((__iface)->input->__in_reg); \
+ __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \
+ WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
+ spin_unlock(&(__iface)->lock); \
+ } while (0)
+
+struct panthor_fw_global_iface *
+panthor_fw_get_glb_iface(struct panthor_device *ptdev);
+
+struct panthor_fw_csg_iface *
+panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
+
+struct panthor_fw_cs_iface *
+panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
+
+int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
+ u32 *acked, u32 timeout_ms);
+
+int panthor_fw_glb_wait_acks(struct panthor_device *ptdev, u32 req_mask, u32 *acked,
+ u32 timeout_ms);
+
+void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_slot);
+
+struct panthor_kernel_bo *
+panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
+ struct panthor_fw_ringbuf_input_iface **input,
+ const struct panthor_fw_ringbuf_output_iface **output,
+ u32 *input_fw_va, u32 *output_fw_va);
+struct panthor_kernel_bo *
+panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size);
+
+struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev);
+
+void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang);
+int panthor_fw_post_reset(struct panthor_device *ptdev);
+
+static inline void panthor_fw_suspend(struct panthor_device *ptdev)
+{
+ panthor_fw_pre_reset(ptdev, false);
+}
+
+static inline int panthor_fw_resume(struct panthor_device *ptdev)
+{
+ return panthor_fw_post_reset(ptdev);
+}
+
+int panthor_fw_init(struct panthor_device *ptdev);
+void panthor_fw_unplug(struct panthor_device *ptdev);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
new file mode 100644
index 000000000000..d6483266d0c2
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_mmu.h"
+
+static void panthor_gem_free_object(struct drm_gem_object *obj)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+
+ drm_gem_free_mmap_offset(&bo->base.base);
+ mutex_destroy(&bo->gpuva_list_lock);
+ drm_gem_shmem_free(&bo->base);
+ drm_gem_object_put(vm_root_gem);
+}
+
+/**
+ * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
+ * @vm: The VM this BO was mapped to.
+ * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
+ * is skipped.
+ */
+void panthor_kernel_bo_destroy(struct panthor_vm *vm,
+ struct panthor_kernel_bo *bo)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ panthor_kernel_bo_vunmap(bo);
+
+ if (drm_WARN_ON(bo->obj->dev,
+ to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
+ goto out_free_bo;
+
+ ret = panthor_vm_unmap_range(vm, bo->va_node.start,
+ panthor_kernel_bo_size(bo));
+ if (ret)
+ goto out_free_bo;
+
+ panthor_vm_free_va(vm, &bo->va_node);
+ drm_gem_object_put(bo->obj);
+
+out_free_bo:
+ kfree(bo);
+}
+
+/**
+ * panthor_kernel_bo_create() - Create and map a GEM object to a VM
+ * @ptdev: Device.
+ * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
+ * @size: Size of the buffer object.
+ * @bo_flags: Combination of drm_panthor_bo_flags flags.
+ * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
+ * that are related to map operations).
+ * @gpu_va: GPU address assigned when mapping to the VM.
+ * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
+ * automatically allocated.
+ *
+ * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
+ */
+struct panthor_kernel_bo *
+panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
+ size_t size, u32 bo_flags, u32 vm_map_flags,
+ u64 gpu_va)
+{
+ struct drm_gem_shmem_object *obj;
+ struct panthor_kernel_bo *kbo;
+ struct panthor_gem_object *bo;
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, !vm))
+ return ERR_PTR(-EINVAL);
+
+ kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
+ if (!kbo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = drm_gem_shmem_create(&ptdev->base, size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free_bo;
+ }
+
+ bo = to_panthor_bo(&obj->base);
+ size = obj->base.size;
+ kbo->obj = &obj->base;
+ bo->flags = bo_flags;
+
+ ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
+ if (ret)
+ goto err_put_obj;
+
+ ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
+ if (ret)
+ goto err_free_va;
+
+ bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
+ drm_gem_object_get(bo->exclusive_vm_root_gem);
+ bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
+ return kbo;
+
+err_free_va:
+ panthor_vm_free_va(vm, &kbo->va_node);
+
+err_put_obj:
+ drm_gem_object_put(&obj->base);
+
+err_free_bo:
+ kfree(kbo);
+ return ERR_PTR(ret);
+}
+
+static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+
+ /* Don't allow mmap on objects that have the NO_MMAP flag set. */
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
+ return -EINVAL;
+
+ return drm_gem_shmem_object_mmap(obj, vma);
+}
+
+static struct dma_buf *
+panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ /* We can't export GEMs that have an exclusive VM. */
+ if (to_panthor_bo(obj)->exclusive_vm_root_gem)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
+static const struct drm_gem_object_funcs panthor_gem_funcs = {
+ .free = panthor_gem_free_object,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = panthor_gem_mmap,
+ .export = panthor_gem_prime_export,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * panthor_gem_create_object - Implementation of driver->gem_create_object.
+ * @ddev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
+{
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ struct panthor_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &panthor_gem_funcs;
+ obj->base.map_wc = !ptdev->coherent;
+ mutex_init(&obj->gpuva_list_lock);
+ drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+
+ return &obj->base.base;
+}
+
+/**
+ * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_panthor_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int
+panthor_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ struct panthor_vm *exclusive_vm,
+ u64 *size, u32 flags, u32 *handle)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct panthor_gem_object *bo;
+
+ shmem = drm_gem_shmem_create(ddev, *size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ bo = to_panthor_bo(&shmem->base);
+ bo->flags = flags;
+
+ if (exclusive_vm) {
+ bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
+ drm_gem_object_get(bo->exclusive_vm_root_gem);
+ bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
+ }
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &shmem->base, handle);
+ if (!ret)
+ *size = bo->base.base.size;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&shmem->base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
new file mode 100644
index 000000000000..3bccba394d00
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_GEM_H__
+#define __PANTHOR_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+#include <linux/iosys-map.h>
+#include <linux/rwsem.h>
+
+struct panthor_vm;
+
+/**
+ * struct panthor_gem_object - Driver specific GEM object.
+ */
+struct panthor_gem_object {
+ /** @base: Inherit from drm_gem_shmem_object. */
+ struct drm_gem_shmem_object base;
+
+ /**
+ * @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
+ * is attached to.
+ *
+ * If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
+ * different VM will fail.
+ *
+ * All FW memory objects have this field set to the root GEM of the MCU
+ * VM.
+ */
+ struct drm_gem_object *exclusive_vm_root_gem;
+
+ /**
+ * @gpuva_list_lock: Custom GPUVA lock.
+ *
+ * Used to protect insertion of drm_gpuva elements to the
+ * drm_gem_object.gpuva.list list.
+ *
+ * We can't use the GEM resv for that, because drm_gpuva_link() is
+ * called in a dma-signaling path, where we're not allowed to take
+ * resv locks.
+ */
+ struct mutex gpuva_list_lock;
+
+ /** @flags: Combination of drm_panthor_bo_flags flags. */
+ u32 flags;
+};
+
+/**
+ * struct panthor_kernel_bo - Kernel buffer object.
+ *
+ * These objects are only manipulated by the kernel driver and not
+ * directly exposed to the userspace. The GPU address of a kernel
+ * BO might be passed to userspace though.
+ */
+struct panthor_kernel_bo {
+ /**
+ * @obj: The GEM object backing this kernel buffer object.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @va_node: VA space allocated to this GEM.
+ */
+ struct drm_mm_node va_node;
+
+ /**
+ * @kmap: Kernel CPU mapping of @gem.
+ */
+ void *kmap;
+};
+
+static inline
+struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
+}
+
+struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
+
+struct drm_gem_object *
+panthor_gem_prime_import_sg_table(struct drm_device *ddev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+int
+panthor_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ struct panthor_vm *exclusive_vm,
+ u64 *size, u32 flags, uint32_t *handle);
+
+static inline u64
+panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
+{
+ return bo->va_node.start;
+}
+
+static inline size_t
+panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
+{
+ return bo->obj->size;
+}
+
+static inline int
+panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
+{
+ struct iosys_map map;
+ int ret;
+
+ if (bo->kmap)
+ return 0;
+
+ ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ if (ret)
+ return ret;
+
+ bo->kmap = map.vaddr;
+ return 0;
+}
+
+static inline void
+panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
+{
+ if (bo->kmap) {
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
+
+ drm_gem_vunmap_unlocked(bo->obj, &map);
+ bo->kmap = NULL;
+ }
+}
+
+struct panthor_kernel_bo *
+panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
+ size_t size, u32 bo_flags, u32 vm_map_flags,
+ u64 gpu_va);
+
+void panthor_kernel_bo_destroy(struct panthor_vm *vm,
+ struct panthor_kernel_bo *bo);
+
+#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
new file mode 100644
index 000000000000..5251d8764e7d
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+#include "panthor_device.h"
+#include "panthor_gpu.h"
+#include "panthor_regs.h"
+
+/**
+ * struct panthor_gpu - GPU block management data.
+ */
+struct panthor_gpu {
+ /** @irq: GPU irq. */
+ struct panthor_irq irq;
+
+ /** @reqs_lock: Lock protecting access to pending_reqs. */
+ spinlock_t reqs_lock;
+
+ /** @pending_reqs: Pending GPU requests. */
+ u32 pending_reqs;
+
+ /** @reqs_acked: GPU request wait queue. */
+ wait_queue_head_t reqs_acked;
+};
+
+/**
+ * struct panthor_model - GPU model description
+ */
+struct panthor_model {
+ /** @name: Model name. */
+ const char *name;
+
+ /** @arch_major: Major version number of architecture. */
+ u8 arch_major;
+
+ /** @product_major: Major version number of product. */
+ u8 product_major;
+};
+
+/**
+ * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
+ * by a combination of the major architecture version and the major product
+ * version.
+ * @_name: Name for the GPU model.
+ * @_arch_major: Architecture major.
+ * @_product_major: Product major.
+ */
+#define GPU_MODEL(_name, _arch_major, _product_major) \
+{\
+ .name = __stringify(_name), \
+ .arch_major = _arch_major, \
+ .product_major = _product_major, \
+}
+
+static const struct panthor_model gpu_models[] = {
+ GPU_MODEL(g610, 10, 7),
+ {},
+};
+
+#define GPU_INTERRUPTS_MASK \
+ (GPU_IRQ_FAULT | \
+ GPU_IRQ_PROTM_FAULT | \
+ GPU_IRQ_RESET_COMPLETED | \
+ GPU_IRQ_CLEAN_CACHES_COMPLETED)
+
+static void panthor_gpu_init_info(struct panthor_device *ptdev)
+{
+ const struct panthor_model *model;
+ u32 arch_major, product_major;
+ u32 major, minor, status;
+ unsigned int i;
+
+ ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
+ ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
+ ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
+
+ ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
+
+ ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
+ ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
+
+ ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
+ ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
+
+ ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
+ ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+
+ arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
+ product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
+ major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
+ minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
+ status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
+
+ for (model = gpu_models; model->name; model++) {
+ if (model->arch_major == arch_major &&
+ model->product_major == product_major)
+ break;
+ }
+
+ drm_info(&ptdev->base,
+ "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
+ major, minor, status);
+
+ drm_info(&ptdev->base,
+ "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
+ ptdev->gpu_info.l2_features,
+ ptdev->gpu_info.tiler_features,
+ ptdev->gpu_info.mem_features,
+ ptdev->gpu_info.mmu_features,
+ ptdev->gpu_info.as_present);
+
+ drm_info(&ptdev->base,
+ "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
+ ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
+ ptdev->gpu_info.tiler_present);
+}
+
+static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ if (status & GPU_IRQ_FAULT) {
+ u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
+ u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
+ gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+
+ drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
+ address);
+ }
+ if (status & GPU_IRQ_PROTM_FAULT)
+ drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
+
+ spin_lock(&ptdev->gpu->reqs_lock);
+ if (status & ptdev->gpu->pending_reqs) {
+ ptdev->gpu->pending_reqs &= ~status;
+ wake_up_all(&ptdev->gpu->reqs_acked);
+ }
+ spin_unlock(&ptdev->gpu->reqs_lock);
+}
+PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
+
+/**
+ * panthor_gpu_unplug() - Called when the GPU is unplugged.
+ * @ptdev: Device to unplug.
+ */
+void panthor_gpu_unplug(struct panthor_device *ptdev)
+{
+ unsigned long flags;
+
+ /* Make sure the IRQ handler is not running after that point. */
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+
+ /* Wake-up all waiters. */
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ ptdev->gpu->pending_reqs = 0;
+ wake_up_all(&ptdev->gpu->reqs_acked);
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+}
+
+/**
+ * panthor_gpu_init() - Initialize the GPU block
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_init(struct panthor_device *ptdev)
+{
+ struct panthor_gpu *gpu;
+ u32 pa_bits;
+ int ret, irq;
+
+ gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ spin_lock_init(&gpu->reqs_lock);
+ init_waitqueue_head(&gpu->reqs_acked);
+ ptdev->gpu = gpu;
+ panthor_gpu_init_info(ptdev);
+
+ dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
+ pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
+ ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
+ if (irq < 0)
+ return irq;
+
+ ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
+ * @ptdev: Device.
+ * @blk_name: Block name.
+ * @pwroff_reg: Power-off register for this block.
+ * @pwrtrans_reg: Power transition register for this block.
+ * @mask: Sub-elements to power-off.
+ * @timeout_us: Timeout in microseconds.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_block_power_off(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwroff_reg, u32 pwrtrans_reg,
+ u64 mask, u32 timeout_us)
+{
+ u32 val, i;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ if (mask & GENMASK(31, 0))
+ gpu_write(ptdev, pwroff_reg, mask);
+
+ if (mask >> 32)
+ gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
+ * @ptdev: Device.
+ * @blk_name: Block name.
+ * @pwron_reg: Power-on register for this block.
+ * @pwrtrans_reg: Power transition register for this block.
+ * @rdy_reg: Power transition ready register.
+ * @mask: Sub-elements to power-on.
+ * @timeout_us: Timeout in microseconds.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_block_power_on(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwron_reg, u32 pwrtrans_reg,
+ u32 rdy_reg, u64 mask, u32 timeout_us)
+{
+ u32 val, i;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
+ val, !(mask32 & val),
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ if (mask & GENMASK(31, 0))
+ gpu_write(ptdev, pwron_reg, mask);
+
+ if (mask >> 32)
+ gpu_write(ptdev, pwron_reg + 4, mask >> 32);
+
+ for (i = 0; i < 2; i++) {
+ u32 mask32 = mask >> (i * 32);
+
+ if (!mask32)
+ continue;
+
+ ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
+ val, (mask32 & val) == mask32,
+ 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_l2_power_on() - Power-on the L2-cache
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
+{
+ if (ptdev->gpu_info.l2_present != 1) {
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
+ (ptdev->gpu_info.l2_present - 2);
+ drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(ptdev->gpu_info.shader_present));
+ }
+
+ return panthor_gpu_power_on(ptdev, L2, 1, 20000);
+}
+
+/**
+ * panthor_gpu_flush_caches() - Flush caches
+ * @ptdev: Device.
+ * @l2: L2 flush type.
+ * @lsc: LSC flush type.
+ * @other: Other flush type.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_flush_caches(struct panthor_device *ptdev,
+ u32 l2, u32 lsc, u32 other)
+{
+ bool timedout = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if (!drm_WARN_ON(&ptdev->base,
+ ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
+ ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
+ gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
+ }
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+
+ if (!wait_event_timeout(ptdev->gpu->reqs_acked,
+ !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
+ msecs_to_jiffies(100))) {
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
+ !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
+ timedout = true;
+ else
+ ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+ }
+
+ if (timedout) {
+ drm_err(&ptdev->base, "Flush caches timeout");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_soft_reset() - Issue a soft-reset
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_gpu_soft_reset(struct panthor_device *ptdev)
+{
+ bool timedout = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if (!drm_WARN_ON(&ptdev->base,
+ ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
+ ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
+ gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
+ }
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+
+ if (!wait_event_timeout(ptdev->gpu->reqs_acked,
+ !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
+ msecs_to_jiffies(100))) {
+ spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
+ if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
+ !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
+ timedout = true;
+ else
+ ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
+ spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
+ }
+
+ if (timedout) {
+ drm_err(&ptdev->base, "Soft reset timeout");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_gpu_suspend() - Suspend the GPU block.
+ * @ptdev: Device.
+ *
+ * Suspend the GPU irq. This should be called last in the suspend procedure,
+ * after all other blocks have been suspented.
+ */
+void panthor_gpu_suspend(struct panthor_device *ptdev)
+{
+ /*
+ * It may be preferable to simply power down the L2, but for now just
+ * soft-reset which will leave the L2 powered down.
+ */
+ panthor_gpu_soft_reset(ptdev);
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+}
+
+/**
+ * panthor_gpu_resume() - Resume the GPU block.
+ * @ptdev: Device.
+ *
+ * Resume the IRQ handler and power-on the L2-cache.
+ * The FW takes care of powering the other blocks.
+ */
+void panthor_gpu_resume(struct panthor_device *ptdev)
+{
+ panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
+ panthor_gpu_l2_power_on(ptdev);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
new file mode 100644
index 000000000000..bba7555dd3c6
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANTHOR_GPU_H__
+#define __PANTHOR_GPU_H__
+
+struct panthor_device;
+
+int panthor_gpu_init(struct panthor_device *ptdev);
+void panthor_gpu_unplug(struct panthor_device *ptdev);
+void panthor_gpu_suspend(struct panthor_device *ptdev);
+void panthor_gpu_resume(struct panthor_device *ptdev);
+
+int panthor_gpu_block_power_on(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwron_reg, u32 pwrtrans_reg,
+ u32 rdy_reg, u64 mask, u32 timeout_us);
+int panthor_gpu_block_power_off(struct panthor_device *ptdev,
+ const char *blk_name,
+ u32 pwroff_reg, u32 pwrtrans_reg,
+ u64 mask, u32 timeout_us);
+
+/**
+ * panthor_gpu_power_on() - Power on the GPU block.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
+ panthor_gpu_block_power_on(ptdev, #type, \
+ type ## _PWRON_LO, \
+ type ## _PWRTRANS_LO, \
+ type ## _READY_LO, \
+ mask, timeout_us)
+
+/**
+ * panthor_gpu_power_off() - Power off the GPU block.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
+ panthor_gpu_block_power_off(ptdev, #type, \
+ type ## _PWROFF_LO, \
+ type ## _PWRTRANS_LO, \
+ mask, timeout_us)
+
+int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
+int panthor_gpu_flush_caches(struct panthor_device *ptdev,
+ u32 l2, u32 lsc, u32 other);
+int panthor_gpu_soft_reset(struct panthor_device *ptdev);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
new file mode 100644
index 000000000000..143fa35f2e74
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#include <linux/iosys-map.h>
+#include <linux/rwsem.h>
+
+#include <drm/panthor_drm.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+
+/*
+ * The GPU heap context is an opaque structure used by the GPU to track the
+ * heap allocations. The driver should only touch it to initialize it (zero all
+ * fields). Because the CPU and GPU can both access this structure it is
+ * required to be GPU cache line aligned.
+ */
+#define HEAP_CONTEXT_SIZE 32
+
+/**
+ * struct panthor_heap_chunk_header - Heap chunk header
+ */
+struct panthor_heap_chunk_header {
+ /**
+ * @next: Next heap chunk in the list.
+ *
+ * This is a GPU VA.
+ */
+ u64 next;
+
+ /** @unknown: MBZ. */
+ u32 unknown[14];
+};
+
+/**
+ * struct panthor_heap_chunk - Structure used to keep track of allocated heap chunks.
+ */
+struct panthor_heap_chunk {
+ /** @node: Used to insert the heap chunk in panthor_heap::chunks. */
+ struct list_head node;
+
+ /** @bo: Buffer object backing the heap chunk. */
+ struct panthor_kernel_bo *bo;
+};
+
+/**
+ * struct panthor_heap - Structure used to manage tiler heap contexts.
+ */
+struct panthor_heap {
+ /** @chunks: List containing all heap chunks allocated so far. */
+ struct list_head chunks;
+
+ /** @lock: Lock protecting insertion in the chunks list. */
+ struct mutex lock;
+
+ /** @chunk_size: Size of each chunk. */
+ u32 chunk_size;
+
+ /** @max_chunks: Maximum number of chunks. */
+ u32 max_chunks;
+
+ /**
+ * @target_in_flight: Number of in-flight render passes after which
+ * we'd let the FW wait for fragment job to finish instead of allocating new chunks.
+ */
+ u32 target_in_flight;
+
+ /** @chunk_count: Number of heap chunks currently allocated. */
+ u32 chunk_count;
+};
+
+#define MAX_HEAPS_PER_POOL 128
+
+/**
+ * struct panthor_heap_pool - Pool of heap contexts
+ *
+ * The pool is attached to a panthor_file and can't be shared across processes.
+ */
+struct panthor_heap_pool {
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @vm: VM this pool is bound to. */
+ struct panthor_vm *vm;
+
+ /** @lock: Lock protecting access to @xa. */
+ struct rw_semaphore lock;
+
+ /** @xa: Array storing panthor_heap objects. */
+ struct xarray xa;
+
+ /** @gpu_contexts: Buffer object containing the GPU heap contexts. */
+ struct panthor_kernel_bo *gpu_contexts;
+};
+
+static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
+{
+ u32 l2_features = ptdev->gpu_info.l2_features;
+ u32 gpu_cache_line_size = GPU_L2_FEATURES_LINE_SIZE(l2_features);
+
+ return ALIGN(HEAP_CONTEXT_SIZE, gpu_cache_line_size);
+}
+
+static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id)
+{
+ return panthor_heap_ctx_stride(pool->ptdev) * id;
+}
+
+static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
+{
+ return pool->gpu_contexts->kmap +
+ panthor_get_heap_ctx_offset(pool, id);
+}
+
+static void panthor_free_heap_chunk(struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ struct panthor_heap_chunk *chunk)
+{
+ mutex_lock(&heap->lock);
+ list_del(&chunk->node);
+ heap->chunk_count--;
+ mutex_unlock(&heap->lock);
+
+ panthor_kernel_bo_destroy(vm, chunk->bo);
+ kfree(chunk);
+}
+
+static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
+ struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ bool initial_chunk)
+{
+ struct panthor_heap_chunk *chunk;
+ struct panthor_heap_chunk_header *hdr;
+ int ret;
+
+ chunk = kmalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(chunk->bo)) {
+ ret = PTR_ERR(chunk->bo);
+ goto err_free_chunk;
+ }
+
+ ret = panthor_kernel_bo_vmap(chunk->bo);
+ if (ret)
+ goto err_destroy_bo;
+
+ hdr = chunk->bo->kmap;
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (initial_chunk && !list_empty(&heap->chunks)) {
+ struct panthor_heap_chunk *prev_chunk;
+ u64 prev_gpuva;
+
+ prev_chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+
+ prev_gpuva = panthor_kernel_bo_gpuva(prev_chunk->bo);
+ hdr->next = (prev_gpuva & GENMASK_ULL(63, 12)) |
+ (heap->chunk_size >> 12);
+ }
+
+ panthor_kernel_bo_vunmap(chunk->bo);
+
+ mutex_lock(&heap->lock);
+ list_add(&chunk->node, &heap->chunks);
+ heap->chunk_count++;
+ mutex_unlock(&heap->lock);
+
+ return 0;
+
+err_destroy_bo:
+ panthor_kernel_bo_destroy(vm, chunk->bo);
+
+err_free_chunk:
+ kfree(chunk);
+
+ return ret;
+}
+
+static void panthor_free_heap_chunks(struct panthor_vm *vm,
+ struct panthor_heap *heap)
+{
+ struct panthor_heap_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
+ panthor_free_heap_chunk(vm, heap, chunk);
+}
+
+static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
+ struct panthor_vm *vm,
+ struct panthor_heap *heap,
+ u32 chunk_count)
+{
+ int ret;
+ u32 i;
+
+ for (i = 0; i < chunk_count; i++) {
+ ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
+{
+ struct panthor_heap *heap;
+
+ heap = xa_erase(&pool->xa, handle);
+ if (!heap)
+ return -EINVAL;
+
+ panthor_free_heap_chunks(pool->vm, heap);
+ mutex_destroy(&heap->lock);
+ kfree(heap);
+ return 0;
+}
+
+/**
+ * panthor_heap_destroy() - Destroy a heap context
+ * @pool: Pool this context belongs to.
+ * @handle: Handle returned by panthor_heap_create().
+ */
+int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
+{
+ int ret;
+
+ down_write(&pool->lock);
+ ret = panthor_heap_destroy_locked(pool, handle);
+ up_write(&pool->lock);
+
+ return ret;
+}
+
+/**
+ * panthor_heap_create() - Create a heap context
+ * @pool: Pool to instantiate the heap context from.
+ * @initial_chunk_count: Number of chunk allocated at initialization time.
+ * Must be at least 1.
+ * @chunk_size: The size of each chunk. Must be a power of two between 256k
+ * and 2M.
+ * @max_chunks: Maximum number of chunks that can be allocated.
+ * @target_in_flight: Maximum number of in-flight render passes.
+ * @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
+ * context.
+ * @first_chunk_gpu_va: Pointer holding the GPU address of the first chunk
+ * assigned to the heap context.
+ *
+ * Return: a positive handle on success, a negative error otherwise.
+ */
+int panthor_heap_create(struct panthor_heap_pool *pool,
+ u32 initial_chunk_count,
+ u32 chunk_size,
+ u32 max_chunks,
+ u32 target_in_flight,
+ u64 *heap_ctx_gpu_va,
+ u64 *first_chunk_gpu_va)
+{
+ struct panthor_heap *heap;
+ struct panthor_heap_chunk *first_chunk;
+ struct panthor_vm *vm;
+ int ret = 0;
+ u32 id;
+
+ if (initial_chunk_count == 0)
+ return -EINVAL;
+
+ if (hweight32(chunk_size) != 1 ||
+ chunk_size < SZ_256K || chunk_size > SZ_2M)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ vm = panthor_vm_get(pool->vm);
+ up_read(&pool->lock);
+
+ /* The pool has been destroyed, we can't create a new heap. */
+ if (!vm)
+ return -EINVAL;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap) {
+ ret = -ENOMEM;
+ goto err_put_vm;
+ }
+
+ mutex_init(&heap->lock);
+ INIT_LIST_HEAD(&heap->chunks);
+ heap->chunk_size = chunk_size;
+ heap->max_chunks = max_chunks;
+ heap->target_in_flight = target_in_flight;
+
+ ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
+ initial_chunk_count);
+ if (ret)
+ goto err_free_heap;
+
+ first_chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+ *first_chunk_gpu_va = panthor_kernel_bo_gpuva(first_chunk->bo);
+
+ down_write(&pool->lock);
+ /* The pool has been destroyed, we can't create a new heap. */
+ if (!pool->vm) {
+ ret = -EINVAL;
+ } else {
+ ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL);
+ if (!ret) {
+ void *gpu_ctx = panthor_get_heap_ctx(pool, id);
+
+ memset(gpu_ctx, 0, panthor_heap_ctx_stride(pool->ptdev));
+ *heap_ctx_gpu_va = panthor_kernel_bo_gpuva(pool->gpu_contexts) +
+ panthor_get_heap_ctx_offset(pool, id);
+ }
+ }
+ up_write(&pool->lock);
+
+ if (ret)
+ goto err_free_heap;
+
+ panthor_vm_put(vm);
+ return id;
+
+err_free_heap:
+ panthor_free_heap_chunks(pool->vm, heap);
+ mutex_destroy(&heap->lock);
+ kfree(heap);
+
+err_put_vm:
+ panthor_vm_put(vm);
+ return ret;
+}
+
+/**
+ * panthor_heap_return_chunk() - Return an unused heap chunk
+ * @pool: The pool this heap belongs to.
+ * @heap_gpu_va: The GPU address of the heap context.
+ * @chunk_gpu_va: The chunk VA to return.
+ *
+ * This function is used when a chunk allocated with panthor_heap_grow()
+ * couldn't be linked to the heap context through the FW interface because
+ * the group requesting the allocation was scheduled out in the meantime.
+ */
+int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u64 chunk_gpu_va)
+{
+ u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
+ u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
+ struct panthor_heap_chunk *chunk, *tmp, *removed = NULL;
+ struct panthor_heap *heap;
+ int ret;
+
+ if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ heap = xa_load(&pool->xa, heap_id);
+ if (!heap) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ chunk_gpu_va &= GENMASK_ULL(63, 12);
+
+ mutex_lock(&heap->lock);
+ list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) {
+ if (panthor_kernel_bo_gpuva(chunk->bo) == chunk_gpu_va) {
+ removed = chunk;
+ list_del(&chunk->node);
+ heap->chunk_count--;
+ break;
+ }
+ }
+ mutex_unlock(&heap->lock);
+
+ if (removed) {
+ panthor_kernel_bo_destroy(pool->vm, chunk->bo);
+ kfree(chunk);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+out_unlock:
+ up_read(&pool->lock);
+ return ret;
+}
+
+/**
+ * panthor_heap_grow() - Make a heap context grow.
+ * @pool: The pool this heap belongs to.
+ * @heap_gpu_va: The GPU address of the heap context.
+ * @renderpasses_in_flight: Number of render passes currently in-flight.
+ * @pending_frag_count: Number of fragment jobs waiting for execution/completion.
+ * @new_chunk_gpu_va: Pointer used to return the chunk VA.
+ */
+int panthor_heap_grow(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u32 renderpasses_in_flight,
+ u32 pending_frag_count,
+ u64 *new_chunk_gpu_va)
+{
+ u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
+ u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
+ struct panthor_heap_chunk *chunk;
+ struct panthor_heap *heap;
+ int ret;
+
+ if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
+ return -EINVAL;
+
+ down_read(&pool->lock);
+ heap = xa_load(&pool->xa, heap_id);
+ if (!heap) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* If we reached the target in-flight render passes, or if we
+ * reached the maximum number of chunks, let the FW figure another way to
+ * find some memory (wait for render passes to finish, or call the exception
+ * handler provided by the userspace driver, if any).
+ */
+ if (renderpasses_in_flight > heap->target_in_flight ||
+ (pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ } else if (heap->chunk_count >= heap->max_chunks) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* FIXME: panthor_alloc_heap_chunk() triggers a kernel BO creation,
+ * which goes through the blocking allocation path. Ultimately, we
+ * want a non-blocking allocation, so we can immediately report to the
+ * FW when the system is running out of memory. In that case, the FW
+ * can call a user-provided exception handler, which might try to free
+ * some tiler memory by issuing an intermediate fragment job. If the
+ * exception handler can't do anything, it will flag the queue as
+ * faulty so the job that triggered this tiler chunk allocation and all
+ * further jobs in this queue fail immediately instead of having to
+ * wait for the job timeout.
+ */
+ ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
+ if (ret)
+ goto out_unlock;
+
+ chunk = list_first_entry(&heap->chunks,
+ struct panthor_heap_chunk,
+ node);
+ *new_chunk_gpu_va = (panthor_kernel_bo_gpuva(chunk->bo) & GENMASK_ULL(63, 12)) |
+ (heap->chunk_size >> 12);
+ ret = 0;
+
+out_unlock:
+ up_read(&pool->lock);
+ return ret;
+}
+
+static void panthor_heap_pool_release(struct kref *refcount)
+{
+ struct panthor_heap_pool *pool =
+ container_of(refcount, struct panthor_heap_pool, refcount);
+
+ xa_destroy(&pool->xa);
+ kfree(pool);
+}
+
+/**
+ * panthor_heap_pool_put() - Release a heap pool reference
+ * @pool: Pool to release the reference on. Can be NULL.
+ */
+void panthor_heap_pool_put(struct panthor_heap_pool *pool)
+{
+ if (pool)
+ kref_put(&pool->refcount, panthor_heap_pool_release);
+}
+
+/**
+ * panthor_heap_pool_get() - Get a heap pool reference
+ * @pool: Pool to get the reference on. Can be NULL.
+ *
+ * Return: @pool.
+ */
+struct panthor_heap_pool *
+panthor_heap_pool_get(struct panthor_heap_pool *pool)
+{
+ if (pool)
+ kref_get(&pool->refcount);
+
+ return pool;
+}
+
+/**
+ * panthor_heap_pool_create() - Create a heap pool
+ * @ptdev: Device.
+ * @vm: The VM this heap pool will be attached to.
+ *
+ * Heap pools might contain up to 128 heap contexts, and are per-VM.
+ *
+ * Return: A valid pointer on success, a negative error code otherwise.
+ */
+struct panthor_heap_pool *
+panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
+{
+ size_t bosize = ALIGN(MAX_HEAPS_PER_POOL *
+ panthor_heap_ctx_stride(ptdev),
+ 4096);
+ struct panthor_heap_pool *pool;
+ int ret = 0;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* We want a weak ref here: the heap pool belongs to the VM, so we're
+ * sure that, as long as the heap pool exists, the VM exists too.
+ */
+ pool->vm = vm;
+ pool->ptdev = ptdev;
+ init_rwsem(&pool->lock);
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1);
+ kref_init(&pool->refcount);
+
+ pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(pool->gpu_contexts)) {
+ ret = PTR_ERR(pool->gpu_contexts);
+ goto err_destroy_pool;
+ }
+
+ ret = panthor_kernel_bo_vmap(pool->gpu_contexts);
+ if (ret)
+ goto err_destroy_pool;
+
+ return pool;
+
+err_destroy_pool:
+ panthor_heap_pool_destroy(pool);
+ return ERR_PTR(ret);
+}
+
+/**
+ * panthor_heap_pool_destroy() - Destroy a heap pool.
+ * @pool: Pool to destroy.
+ *
+ * This function destroys all heap contexts and their resources. Thus
+ * preventing any use of the heap context or the chunk attached to them
+ * after that point.
+ *
+ * If the GPU still has access to some heap contexts, a fault should be
+ * triggered, which should flag the command stream groups using these
+ * context as faulty.
+ *
+ * The heap pool object is only released when all references to this pool
+ * are released.
+ */
+void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
+{
+ struct panthor_heap *heap;
+ unsigned long i;
+
+ if (!pool)
+ return;
+
+ down_write(&pool->lock);
+ xa_for_each(&pool->xa, i, heap)
+ drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
+
+ if (!IS_ERR_OR_NULL(pool->gpu_contexts))
+ panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts);
+
+ /* Reflects the fact the pool has been destroyed. */
+ pool->vm = NULL;
+ up_write(&pool->lock);
+
+ panthor_heap_pool_put(pool);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
new file mode 100644
index 000000000000..25a5f2bba445
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_heap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_HEAP_H__
+#define __PANTHOR_HEAP_H__
+
+#include <linux/types.h>
+
+struct panthor_device;
+struct panthor_heap_pool;
+struct panthor_vm;
+
+int panthor_heap_create(struct panthor_heap_pool *pool,
+ u32 initial_chunk_count,
+ u32 chunk_size,
+ u32 max_chunks,
+ u32 target_in_flight,
+ u64 *heap_ctx_gpu_va,
+ u64 *first_chunk_gpu_va);
+int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle);
+
+struct panthor_heap_pool *
+panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm);
+void panthor_heap_pool_destroy(struct panthor_heap_pool *pool);
+
+struct panthor_heap_pool *
+panthor_heap_pool_get(struct panthor_heap_pool *pool);
+void panthor_heap_pool_put(struct panthor_heap_pool *pool);
+
+int panthor_heap_grow(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u32 renderpasses_in_flight,
+ u32 pending_frag_count,
+ u64 *new_chunk_gpu_va);
+int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
+ u64 heap_gpu_va,
+ u64 chunk_gpu_va);
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
new file mode 100644
index 000000000000..fa0a002b1016
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -0,0 +1,2774 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/kmemleak.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
+#include <linux/sizes.h>
+
+#include "panthor_device.h"
+#include "panthor_gem.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+#define MAX_AS_SLOTS 32
+
+struct panthor_vm;
+
+/**
+ * struct panthor_as_slot - Address space slot
+ */
+struct panthor_as_slot {
+ /** @vm: VM bound to this slot. NULL is no VM is bound. */
+ struct panthor_vm *vm;
+};
+
+/**
+ * struct panthor_mmu - MMU related data
+ */
+struct panthor_mmu {
+ /** @irq: The MMU irq. */
+ struct panthor_irq irq;
+
+ /** @as: Address space related fields.
+ *
+ * The GPU has a limited number of address spaces (AS) slots, forcing
+ * us to re-assign them to re-assign slots on-demand.
+ */
+ struct {
+ /** @slots_lock: Lock protecting access to all other AS fields. */
+ struct mutex slots_lock;
+
+ /** @alloc_mask: Bitmask encoding the allocated slots. */
+ unsigned long alloc_mask;
+
+ /** @faulty_mask: Bitmask encoding the faulty slots. */
+ unsigned long faulty_mask;
+
+ /** @slots: VMs currently bound to the AS slots. */
+ struct panthor_as_slot slots[MAX_AS_SLOTS];
+
+ /**
+ * @lru_list: List of least recently used VMs.
+ *
+ * We use this list to pick a VM to evict when all slots are
+ * used.
+ *
+ * There should be no more active VMs than there are AS slots,
+ * so this LRU is just here to keep VMs bound until there's
+ * a need to release a slot, thus avoid unnecessary TLB/cache
+ * flushes.
+ */
+ struct list_head lru_list;
+ } as;
+
+ /** @vm: VMs management fields */
+ struct {
+ /** @lock: Lock protecting access to list. */
+ struct mutex lock;
+
+ /** @list: List containing all VMs. */
+ struct list_head list;
+
+ /** @reset_in_progress: True if a reset is in progress. */
+ bool reset_in_progress;
+
+ /** @wq: Workqueue used for the VM_BIND queues. */
+ struct workqueue_struct *wq;
+ } vm;
+};
+
+/**
+ * struct panthor_vm_pool - VM pool object
+ */
+struct panthor_vm_pool {
+ /** @xa: Array used for VM handle tracking. */
+ struct xarray xa;
+};
+
+/**
+ * struct panthor_vma - GPU mapping object
+ *
+ * This is used to track GEM mappings in GPU space.
+ */
+struct panthor_vma {
+ /** @base: Inherits from drm_gpuva. */
+ struct drm_gpuva base;
+
+ /** @node: Used to implement deferred release of VMAs. */
+ struct list_head node;
+
+ /**
+ * @flags: Combination of drm_panthor_vm_bind_op_flags.
+ *
+ * Only map related flags are accepted.
+ */
+ u32 flags;
+};
+
+/**
+ * struct panthor_vm_op_ctx - VM operation context
+ *
+ * With VM operations potentially taking place in a dma-signaling path, we
+ * need to make sure everything that might require resource allocation is
+ * pre-allocated upfront. This is what this operation context is far.
+ *
+ * We also collect resources that have been freed, so we can release them
+ * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
+ * request.
+ */
+struct panthor_vm_op_ctx {
+ /** @rsvd_page_tables: Pages reserved for the MMU page table update. */
+ struct {
+ /** @count: Number of pages reserved. */
+ u32 count;
+
+ /** @ptr: Point to the first unused page in the @pages table. */
+ u32 ptr;
+
+ /**
+ * @page: Array of pages that can be used for an MMU page table update.
+ *
+ * After an VM operation, there might be free pages left in this array.
+ * They should be returned to the pt_cache as part of the op_ctx cleanup.
+ */
+ void **pages;
+ } rsvd_page_tables;
+
+ /**
+ * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
+ *
+ * Partial unmap requests or map requests overlapping existing mappings will
+ * trigger a remap call, which need to register up to three panthor_vma objects
+ * (one for the new mapping, and two for the previous and next mappings).
+ */
+ struct panthor_vma *preallocated_vmas[3];
+
+ /** @flags: Combination of drm_panthor_vm_bind_op_flags. */
+ u32 flags;
+
+ /** @va: Virtual range targeted by the VM operation. */
+ struct {
+ /** @addr: Start address. */
+ u64 addr;
+
+ /** @range: Range size. */
+ u64 range;
+ } va;
+
+ /**
+ * @returned_vmas: List of panthor_vma objects returned after a VM operation.
+ *
+ * For unmap operations, this will contain all VMAs that were covered by the
+ * specified VA range.
+ *
+ * For map operations, this will contain all VMAs that previously mapped to
+ * the specified VA range.
+ *
+ * Those VMAs, and the resources they point to will be released as part of
+ * the op_ctx cleanup operation.
+ */
+ struct list_head returned_vmas;
+
+ /** @map: Fields specific to a map operation. */
+ struct {
+ /** @vm_bo: Buffer object to map. */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /** @bo_offset: Offset in the buffer object. */
+ u64 bo_offset;
+
+ /**
+ * @sgt: sg-table pointing to pages backing the GEM object.
+ *
+ * This is gathered at job creation time, such that we don't have
+ * to allocate in ::run_job().
+ */
+ struct sg_table *sgt;
+
+ /**
+ * @new_vma: The new VMA object that will be inserted to the VA tree.
+ */
+ struct panthor_vma *new_vma;
+ } map;
+};
+
+/**
+ * struct panthor_vm - VM object
+ *
+ * A VM is an object representing a GPU (or MCU) virtual address space.
+ * It embeds the MMU page table for this address space, a tree containing
+ * all the virtual mappings of GEM objects, and other things needed to manage
+ * the VM.
+ *
+ * Except for the MCU VM, which is managed by the kernel, all other VMs are
+ * created by userspace and mostly managed by userspace, using the
+ * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
+ *
+ * A portion of the virtual address space is reserved for kernel objects,
+ * like heap chunks, and userspace gets to decide how much of the virtual
+ * address space is left to the kernel (half of the virtual address space
+ * by default).
+ */
+struct panthor_vm {
+ /**
+ * @base: Inherit from drm_gpuvm.
+ *
+ * We delegate all the VA management to the common drm_gpuvm framework
+ * and only implement hooks to update the MMU page table.
+ */
+ struct drm_gpuvm base;
+
+ /**
+ * @sched: Scheduler used for asynchronous VM_BIND request.
+ *
+ * We use a 1:1 scheduler here.
+ */
+ struct drm_gpu_scheduler sched;
+
+ /**
+ * @entity: Scheduling entity representing the VM_BIND queue.
+ *
+ * There's currently one bind queue per VM. It doesn't make sense to
+ * allow more given the VM operations are serialized anyway.
+ */
+ struct drm_sched_entity entity;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @memattr: Value to program to the AS_MEMATTR register. */
+ u64 memattr;
+
+ /** @pgtbl_ops: Page table operations. */
+ struct io_pgtable_ops *pgtbl_ops;
+
+ /** @root_page_table: Stores the root page table pointer. */
+ void *root_page_table;
+
+ /**
+ * @op_lock: Lock used to serialize operations on a VM.
+ *
+ * The serialization of jobs queued to the VM_BIND queue is already
+ * taken care of by drm_sched, but we need to serialize synchronous
+ * and asynchronous VM_BIND request. This is what this lock is for.
+ */
+ struct mutex op_lock;
+
+ /**
+ * @op_ctx: The context attached to the currently executing VM operation.
+ *
+ * NULL when no operation is in progress.
+ */
+ struct panthor_vm_op_ctx *op_ctx;
+
+ /**
+ * @mm: Memory management object representing the auto-VA/kernel-VA.
+ *
+ * Used to auto-allocate VA space for kernel-managed objects (tiler
+ * heaps, ...).
+ *
+ * For the MCU VM, this is managing the VA range that's used to map
+ * all shared interfaces.
+ *
+ * For user VMs, the range is specified by userspace, and must not
+ * exceed half of the VA space addressable.
+ */
+ struct drm_mm mm;
+
+ /** @mm_lock: Lock protecting the @mm field. */
+ struct mutex mm_lock;
+
+ /** @kernel_auto_va: Automatic VA-range for kernel BOs. */
+ struct {
+ /** @start: Start of the automatic VA-range for kernel BOs. */
+ u64 start;
+
+ /** @size: Size of the automatic VA-range for kernel BOs. */
+ u64 end;
+ } kernel_auto_va;
+
+ /** @as: Address space related fields. */
+ struct {
+ /**
+ * @id: ID of the address space this VM is bound to.
+ *
+ * A value of -1 means the VM is inactive/not bound.
+ */
+ int id;
+
+ /** @active_cnt: Number of active users of this VM. */
+ refcount_t active_cnt;
+
+ /**
+ * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
+ *
+ * Active VMs should not be inserted in the LRU list.
+ */
+ struct list_head lru_node;
+ } as;
+
+ /**
+ * @heaps: Tiler heap related fields.
+ */
+ struct {
+ /**
+ * @pool: The heap pool attached to this VM.
+ *
+ * Will stay NULL until someone creates a heap context on this VM.
+ */
+ struct panthor_heap_pool *pool;
+
+ /** @lock: Lock used to protect access to @pool. */
+ struct mutex lock;
+ } heaps;
+
+ /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
+ struct list_head node;
+
+ /** @for_mcu: True if this is the MCU VM. */
+ bool for_mcu;
+
+ /**
+ * @destroyed: True if the VM was destroyed.
+ *
+ * No further bind requests should be queued to a destroyed VM.
+ */
+ bool destroyed;
+
+ /**
+ * @unusable: True if the VM has turned unusable because something
+ * bad happened during an asynchronous request.
+ *
+ * We don't try to recover from such failures, because this implies
+ * informing userspace about the specific operation that failed, and
+ * hoping the userspace driver can replay things from there. This all
+ * sounds very complicated for little gain.
+ *
+ * Instead, we should just flag the VM as unusable, and fail any
+ * further request targeting this VM.
+ *
+ * We also provide a way to query a VM state, so userspace can destroy
+ * it and create a new one.
+ *
+ * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
+ * situation, where the logical device needs to be re-created.
+ */
+ bool unusable;
+
+ /**
+ * @unhandled_fault: Unhandled fault happened.
+ *
+ * This should be reported to the scheduler, and the queue/group be
+ * flagged as faulty as a result.
+ */
+ bool unhandled_fault;
+};
+
+/**
+ * struct panthor_vm_bind_job - VM bind job
+ */
+struct panthor_vm_bind_job {
+ /** @base: Inherit from drm_sched_job. */
+ struct drm_sched_job base;
+
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
+ struct work_struct cleanup_op_ctx_work;
+
+ /** @vm: VM targeted by the VM operation. */
+ struct panthor_vm *vm;
+
+ /** @ctx: Operation context. */
+ struct panthor_vm_op_ctx ctx;
+};
+
+/**
+ * @pt_cache: Cache used to allocate MMU page tables.
+ *
+ * The pre-allocation pattern forces us to over-allocate to plan for
+ * the worst case scenario, and return the pages we didn't use.
+ *
+ * Having a kmem_cache allows us to speed allocations.
+ */
+static struct kmem_cache *pt_cache;
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+ struct panthor_vm *vm = cookie;
+ void *page;
+
+ /* Allocation of the root page table happening during init. */
+ if (unlikely(!vm->root_page_table)) {
+ struct page *p;
+
+ drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
+ p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
+ gfp | __GFP_ZERO, get_order(size));
+ page = p ? page_address(p) : NULL;
+ vm->root_page_table = page;
+ return page;
+ }
+
+ /* We're not supposed to have anything bigger than 4k here, because we picked a
+ * 4k granule size at init time.
+ */
+ if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
+ return NULL;
+
+ /* We must have some op_ctx attached to the VM and it must have at least one
+ * free page.
+ */
+ if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
+ drm_WARN_ON(&vm->ptdev->base,
+ vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
+ return NULL;
+
+ page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
+ memset(page, 0, SZ_4K);
+
+ /* Page table entries don't use virtual addresses, which trips out
+ * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+ * are mixed with other fields, and I fear kmemleak won't detect that
+ * either.
+ *
+ * Let's just ignore memory passed to the page-table driver for now.
+ */
+ kmemleak_ignore(page);
+ return page;
+}
+
+/**
+ * @free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void free_pt(void *cookie, void *data, size_t size)
+{
+ struct panthor_vm *vm = cookie;
+
+ if (unlikely(vm->root_page_table == data)) {
+ free_pages((unsigned long)data, get_order(size));
+ vm->root_page_table = NULL;
+ return;
+ }
+
+ if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
+ return;
+
+ /* Return the page to the pt_cache. */
+ kmem_cache_free(pt_cache, data);
+}
+
+static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
+{
+ int ret;
+ u32 val;
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending.
+ */
+ ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
+ val, !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
+
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
+ }
+
+ return ret;
+}
+
+static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(ptdev, as_nr);
+ if (!status)
+ gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
+
+ return status;
+}
+
+static void lock_region(struct panthor_device *ptdev, u32 as_nr,
+ u64 region_start, u64 size)
+{
+ u8 region_width;
+ u64 region;
+ u64 region_end = region_start + size;
+
+ if (!size)
+ return;
+
+ /*
+ * The locked region is a naturally aligned power of 2 block encoded as
+ * log2 minus(1).
+ * Calculate the desired start/end and look for the highest bit which
+ * differs. The smallest naturally aligned block must include this bit
+ * change, the desired region starts with this bit (and subsequent bits)
+ * zeroed and ends with the bit (and subsequent bits) set to one.
+ */
+ region_width = max(fls64(region_start ^ (region_end - 1)),
+ const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+ /*
+ * Mask off the low bits of region_start (which would be ignored by
+ * the hardware anyway)
+ */
+ region_start &= GENMASK_ULL(63, region_width);
+
+ region = region_width | region_start;
+
+ /* Lock the region that needs to be updated */
+ gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
+ gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
+}
+
+static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
+ u64 iova, u64 size, u32 op)
+{
+ lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+
+ if (as_nr < 0)
+ return 0;
+
+ if (op != AS_COMMAND_UNLOCK)
+ lock_region(ptdev, as_nr, iova, size);
+
+ /* Run the MMU operation */
+ write_cmd(ptdev, as_nr, op);
+
+ /* Wait for the flush to complete */
+ return wait_ready(ptdev, as_nr);
+}
+
+static int mmu_hw_do_operation(struct panthor_vm *vm,
+ u64 iova, u64 size, u32 op)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ int ret;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ return ret;
+}
+
+static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
+ u64 transtab, u64 transcfg, u64 memattr)
+{
+ int ret;
+
+ ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
+ if (ret)
+ return ret;
+
+ gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
+ gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
+
+ gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
+ gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+
+ gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
+ return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
+{
+ int ret;
+
+ ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
+ if (ret)
+ return ret;
+
+ gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
+ gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
+
+ gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
+ gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
+
+ gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+
+ return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
+{
+ /* Bits 16 to 31 mean REQ_COMPLETE. */
+ return value & GENMASK(15, 0);
+}
+
+static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
+{
+ return BIT(as);
+}
+
+/**
+ * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
+ * @vm: VM to check.
+ *
+ * Return: true if the VM has unhandled faults, false otherwise.
+ */
+bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
+{
+ return vm->unhandled_fault;
+}
+
+/**
+ * panthor_vm_is_unusable() - Check if the VM is still usable
+ * @vm: VM to check.
+ *
+ * Return: true if the VM is unusable, false otherwise.
+ */
+bool panthor_vm_is_unusable(struct panthor_vm *vm)
+{
+ return vm->unusable;
+}
+
+static void panthor_vm_release_as_locked(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+
+ lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+
+ if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
+ return;
+
+ ptdev->mmu->as.slots[vm->as.id].vm = NULL;
+ clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
+ refcount_set(&vm->as.active_cnt, 0);
+ list_del_init(&vm->as.lru_node);
+ vm->as.id = -1;
+}
+
+/**
+ * panthor_vm_active() - Flag a VM as active
+ * @VM: VM to flag as active.
+ *
+ * Assigns an address space to a VM so it can be used by the GPU/MCU.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_active(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
+ int ret = 0, as, cookie;
+ u64 transtab, transcfg;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return -ENODEV;
+
+ if (refcount_inc_not_zero(&vm->as.active_cnt))
+ goto out_dev_exit;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ if (refcount_inc_not_zero(&vm->as.active_cnt))
+ goto out_unlock;
+
+ as = vm->as.id;
+ if (as >= 0) {
+ /* Unhandled pagefault on this AS, the MMU was disabled. We need to
+ * re-enable the MMU after clearing+unmasking the AS interrupts.
+ */
+ if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
+ goto out_enable_as;
+
+ goto out_make_active;
+ }
+
+ /* Check for a free AS */
+ if (vm->for_mcu) {
+ drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
+ as = 0;
+ } else {
+ as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
+ }
+
+ if (!(BIT(as) & ptdev->gpu_info.as_present)) {
+ struct panthor_vm *lru_vm;
+
+ lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
+ struct panthor_vm,
+ as.lru_node);
+ if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
+ as = lru_vm->as.id;
+ panthor_vm_release_as_locked(lru_vm);
+ }
+
+ /* Assign the free or reclaimed AS to the FD */
+ vm->as.id = as;
+ set_bit(as, &ptdev->mmu->as.alloc_mask);
+ ptdev->mmu->as.slots[as].vm = vm;
+
+out_enable_as:
+ transtab = cfg->arm_lpae_s1_cfg.ttbr;
+ transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - va_bits);
+ if (ptdev->coherent)
+ transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ /* If the VM is re-activated, we clear the fault. */
+ vm->unhandled_fault = false;
+
+ /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
+ * before enabling the AS.
+ */
+ if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
+ gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
+ ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
+ }
+
+ ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
+
+out_make_active:
+ if (!ret) {
+ refcount_set(&vm->as.active_cnt, 1);
+ list_del_init(&vm->as.lru_node);
+ }
+
+out_unlock:
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+/**
+ * panthor_vm_idle() - Flag a VM idle
+ * @VM: VM to flag as idle.
+ *
+ * When we know the GPU is done with the VM (no more jobs to process),
+ * we can relinquish the AS slot attached to this VM, if any.
+ *
+ * We don't release the slot immediately, but instead place the VM in
+ * the LRU list, so it can be evicted if another VM needs an AS slot.
+ * This way, VMs keep attached to the AS they were given until we run
+ * out of free slot, limiting the number of MMU operations (TLB flush
+ * and other AS updates).
+ */
+void panthor_vm_idle(struct panthor_vm *vm)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+
+ if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
+ return;
+
+ if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
+ list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
+
+ refcount_set(&vm->as.active_cnt, 0);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+}
+
+static void panthor_vm_stop(struct panthor_vm *vm)
+{
+ drm_sched_stop(&vm->sched, NULL);
+}
+
+static void panthor_vm_start(struct panthor_vm *vm)
+{
+ drm_sched_start(&vm->sched, true);
+}
+
+/**
+ * panthor_vm_as() - Get the AS slot attached to a VM
+ * @vm: VM to get the AS slot of.
+ *
+ * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
+ */
+int panthor_vm_as(struct panthor_vm *vm)
+{
+ return vm->as.id;
+}
+
+static size_t get_pgsize(u64 addr, size_t size, size_t *count)
+{
+ /*
+ * io-pgtable only operates on multiple pages within a single table
+ * entry, so we need to split at boundaries of the table size, i.e.
+ * the next block size up. The distance from address A to the next
+ * boundary of block size B is logically B - A % B, but in unsigned
+ * two's complement where B is a power of two we get the equivalence
+ * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
+ */
+ size_t blk_offset = -addr % SZ_2M;
+
+ if (blk_offset || size < SZ_2M) {
+ *count = min_not_zero(blk_offset, size) / SZ_4K;
+ return SZ_4K;
+ }
+ blk_offset = -addr % SZ_1G ?: SZ_1G;
+ *count = min(blk_offset, size) / SZ_2M;
+ return SZ_2M;
+}
+
+static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ int ret = 0, cookie;
+
+ if (vm->as.id < 0)
+ return 0;
+
+ /* If the device is unplugged, we just silently skip the flush. */
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return 0;
+
+ /* Flush the PTs only if we're already awake */
+ if (pm_runtime_active(ptdev->base.dev))
+ ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ struct io_pgtable_ops *ops = vm->pgtbl_ops;
+ u64 offset = 0;
+
+ drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
+
+ while (offset < size) {
+ size_t unmapped_sz = 0, pgcount;
+ size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
+
+ unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
+
+ if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
+ drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
+ iova + offset + unmapped_sz,
+ iova + offset + pgsize * pgcount,
+ iova, iova + size);
+ panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
+ return -EINVAL;
+ }
+ offset += unmapped_sz;
+ }
+
+ return panthor_vm_flush_range(vm, iova, size);
+}
+
+static int
+panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
+ struct sg_table *sgt, u64 offset, u64 size)
+{
+ struct panthor_device *ptdev = vm->ptdev;
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct io_pgtable_ops *ops = vm->pgtbl_ops;
+ u64 start_iova = iova;
+ int ret;
+
+ if (!size)
+ return 0;
+
+ for_each_sgtable_dma_sg(sgt, sgl, count) {
+ dma_addr_t paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+ if (len <= offset) {
+ offset -= len;
+ continue;
+ }
+
+ paddr += offset;
+ len -= offset;
+ len = min_t(size_t, len, size);
+ size -= len;
+
+ drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
+ vm->as.id, iova, &paddr, len);
+
+ while (len) {
+ size_t pgcount, mapped = 0;
+ size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
+
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ GFP_KERNEL, &mapped);
+ iova += mapped;
+ paddr += mapped;
+ len -= mapped;
+
+ if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
+ ret = -ENOMEM;
+
+ if (ret) {
+ /* If something failed, unmap what we've already mapped before
+ * returning. The unmap call is not supposed to fail.
+ */
+ drm_WARN_ON(&ptdev->base,
+ panthor_vm_unmap_pages(vm, start_iova,
+ iova - start_iova));
+ return ret;
+ }
+ }
+
+ if (!size)
+ break;
+ }
+
+ return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
+}
+
+static int flags_to_prot(u32 flags)
+{
+ int prot = 0;
+
+ if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
+ prot |= IOMMU_NOEXEC;
+
+ if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
+ prot |= IOMMU_CACHE;
+
+ if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
+ prot |= IOMMU_READ;
+ else
+ prot |= IOMMU_READ | IOMMU_WRITE;
+
+ return prot;
+}
+
+/**
+ * panthor_vm_alloc_va() - Allocate a region in the auto-va space
+ * @VM: VM to allocate a region on.
+ * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
+ * wants the VA to be automatically allocated from the auto-VA range.
+ * @size: size of the VA range.
+ * @va_node: drm_mm_node to initialize. Must be zero-initialized.
+ *
+ * Some GPU objects, like heap chunks, are fully managed by the kernel and
+ * need to be mapped to the userspace VM, in the region reserved for kernel
+ * objects.
+ *
+ * This function takes care of allocating a region in the kernel auto-VA space.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int
+panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
+ struct drm_mm_node *va_node)
+{
+ int ret;
+
+ if (!size || (size & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ return -EINVAL;
+
+ mutex_lock(&vm->mm_lock);
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
+ va_node->start = va;
+ va_node->size = size;
+ ret = drm_mm_reserve_node(&vm->mm, va_node);
+ } else {
+ ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
+ size >= SZ_2M ? SZ_2M : SZ_4K,
+ 0, vm->kernel_auto_va.start,
+ vm->kernel_auto_va.end,
+ DRM_MM_INSERT_BEST);
+ }
+ mutex_unlock(&vm->mm_lock);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
+ * @VM: VM to free the region on.
+ * @va_node: Memory node representing the region to free.
+ */
+void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
+{
+ mutex_lock(&vm->mm_lock);
+ drm_mm_remove_node(va_node);
+ mutex_unlock(&vm->mm_lock);
+}
+
+static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
+ struct drm_gpuvm *vm = vm_bo->vm;
+ bool unpin;
+
+ /* We must retain the GEM before calling drm_gpuvm_bo_put(),
+ * otherwise the mutex might be destroyed while we hold it.
+ * Same goes for the VM, since we take the VM resv lock.
+ */
+ drm_gem_object_get(&bo->base.base);
+ drm_gpuvm_get(vm);
+
+ /* We take the resv lock to protect against concurrent accesses to the
+ * gpuvm evicted/extobj lists that are modified in
+ * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
+ * releases sthe last vm_bo reference.
+ * We take the BO GPUVA list lock to protect the vm_bo removal from the
+ * GEM vm_bo list.
+ */
+ dma_resv_lock(drm_gpuvm_resv(vm), NULL);
+ mutex_lock(&bo->gpuva_list_lock);
+ unpin = drm_gpuvm_bo_put(vm_bo);
+ mutex_unlock(&bo->gpuva_list_lock);
+ dma_resv_unlock(drm_gpuvm_resv(vm));
+
+ /* If the vm_bo object was destroyed, release the pin reference that
+ * was hold by this object.
+ */
+ if (unpin && !bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ drm_gpuvm_put(vm);
+ drm_gem_object_put(&bo->base.base);
+}
+
+static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm)
+{
+ struct panthor_vma *vma, *tmp_vma;
+
+ u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
+ op_ctx->rsvd_page_tables.ptr;
+
+ if (remaining_pt_count) {
+ kmem_cache_free_bulk(pt_cache, remaining_pt_count,
+ op_ctx->rsvd_page_tables.pages +
+ op_ctx->rsvd_page_tables.ptr);
+ }
+
+ kfree(op_ctx->rsvd_page_tables.pages);
+
+ if (op_ctx->map.vm_bo)
+ panthor_vm_bo_put(op_ctx->map.vm_bo);
+
+ for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
+ kfree(op_ctx->preallocated_vmas[i]);
+
+ list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
+ list_del(&vma->node);
+ panthor_vm_bo_put(vma->base.vm_bo);
+ kfree(vma);
+ }
+}
+
+static struct panthor_vma *
+panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
+{
+ for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
+ struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
+
+ if (vma) {
+ op_ctx->preallocated_vmas[i] = NULL;
+ return vma;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
+{
+ u32 vma_count;
+
+ switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ /* One VMA for the new mapping, and two more VMAs for the remap case
+ * which might contain both a prev and next VA.
+ */
+ vma_count = 3;
+ break;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ /* Partial unmaps might trigger a remap with either a prev or a next VA,
+ * but not both.
+ */
+ vma_count = 1;
+ break;
+
+ default:
+ return 0;
+ }
+
+ for (u32 i = 0; i < vma_count; i++) {
+ struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+
+ if (!vma)
+ return -ENOMEM;
+
+ op_ctx->preallocated_vmas[i] = vma;
+ }
+
+ return 0;
+}
+
+#define PANTHOR_VM_BIND_OP_MAP_FLAGS \
+ (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
+ DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+
+static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm,
+ struct panthor_gem_object *bo,
+ u64 offset,
+ u64 size, u64 va,
+ u32 flags)
+{
+ struct drm_gpuvm_bo *preallocated_vm_bo;
+ struct sg_table *sgt = NULL;
+ u64 pt_count;
+ int ret;
+
+ if (!bo)
+ return -EINVAL;
+
+ if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
+ (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
+ return -EINVAL;
+
+ /* Make sure the VA and size are aligned and in-bounds. */
+ if (size > bo->base.base.size || offset > bo->base.base.size - size)
+ return -EINVAL;
+
+ /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
+ if (bo->exclusive_vm_root_gem &&
+ bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
+ return -EINVAL;
+
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->flags = flags;
+ op_ctx->va.range = size;
+ op_ctx->va.addr = va;
+
+ ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
+ if (ret)
+ goto err_cleanup;
+
+ if (!bo->base.base.import_attach) {
+ /* Pre-reserve the BO pages, so the map operation doesn't have to
+ * allocate.
+ */
+ ret = drm_gem_shmem_pin(&bo->base);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt)) {
+ if (!bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ ret = PTR_ERR(sgt);
+ goto err_cleanup;
+ }
+
+ op_ctx->map.sgt = sgt;
+
+ preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
+ if (!preallocated_vm_bo) {
+ if (!bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ mutex_lock(&bo->gpuva_list_lock);
+ op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
+ mutex_unlock(&bo->gpuva_list_lock);
+
+ /* If the a vm_bo for this <VM,BO> combination exists, it already
+ * retains a pin ref, and we can release the one we took earlier.
+ *
+ * If our pre-allocated vm_bo is picked, it now retains the pin ref,
+ * which will be released in panthor_vm_bo_put().
+ */
+ if (preallocated_vm_bo != op_ctx->map.vm_bo &&
+ !bo->base.base.import_attach)
+ drm_gem_shmem_unpin(&bo->base);
+
+ op_ctx->map.bo_offset = offset;
+
+ /* L1, L2 and L3 page tables.
+ * We could optimize L3 allocation by iterating over the sgt and merging
+ * 2M contiguous blocks, but it's simpler to over-provision and return
+ * the pages if they're not used.
+ */
+ pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
+ ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
+ ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
+
+ op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
+ sizeof(*op_ctx->rsvd_page_tables.pages),
+ GFP_KERNEL);
+ if (!op_ctx->rsvd_page_tables.pages) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
+ op_ctx->rsvd_page_tables.pages);
+ op_ctx->rsvd_page_tables.count = ret;
+ if (ret != pt_count) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ /* Insert BO into the extobj list last, when we know nothing can fail. */
+ dma_resv_lock(panthor_vm_resv(vm), NULL);
+ drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
+ dma_resv_unlock(panthor_vm_resv(vm));
+
+ return 0;
+
+err_cleanup:
+ panthor_vm_cleanup_op_ctx(op_ctx, vm);
+ return ret;
+}
+
+static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm,
+ u64 va, u64 size)
+{
+ u32 pt_count = 0;
+ int ret;
+
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->va.range = size;
+ op_ctx->va.addr = va;
+ op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
+
+ /* Pre-allocate L3 page tables to account for the split-2M-block
+ * situation on unmap.
+ */
+ if (va != ALIGN(va, SZ_2M))
+ pt_count++;
+
+ if (va + size != ALIGN(va + size, SZ_2M) &&
+ ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
+ pt_count++;
+
+ ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
+ if (ret)
+ goto err_cleanup;
+
+ if (pt_count) {
+ op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
+ sizeof(*op_ctx->rsvd_page_tables.pages),
+ GFP_KERNEL);
+ if (!op_ctx->rsvd_page_tables.pages) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
+ op_ctx->rsvd_page_tables.pages);
+ if (ret != pt_count) {
+ ret = -ENOMEM;
+ goto err_cleanup;
+ }
+ op_ctx->rsvd_page_tables.count = pt_count;
+ }
+
+ return 0;
+
+err_cleanup:
+ panthor_vm_cleanup_op_ctx(op_ctx, vm);
+ return ret;
+}
+
+static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vm *vm)
+{
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->returned_vmas);
+ op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
+}
+
+/**
+ * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
+ * @vm: VM to look into.
+ * @va: Virtual address to search for.
+ * @bo_offset: Offset of the GEM object mapped at this virtual address.
+ * Only valid on success.
+ *
+ * The object returned by this function might no longer be mapped when the
+ * function returns. It's the caller responsibility to ensure there's no
+ * concurrent map/unmap operations making the returned value invalid, or
+ * make sure it doesn't matter if the object is no longer mapped.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_gem_object *
+panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
+{
+ struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
+ struct drm_gpuva *gpuva;
+ struct panthor_vma *vma;
+
+ /* Take the VM lock to prevent concurrent map/unmap operations. */
+ mutex_lock(&vm->op_lock);
+ gpuva = drm_gpuva_find_first(&vm->base, va, 1);
+ vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
+ if (vma && vma->base.gem.obj) {
+ drm_gem_object_get(vma->base.gem.obj);
+ bo = to_panthor_bo(vma->base.gem.obj);
+ *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
+ }
+ mutex_unlock(&vm->op_lock);
+
+ return bo;
+}
+
+#define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M
+
+static u64
+panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
+ u64 full_va_range)
+{
+ u64 user_va_range;
+
+ /* Make sure we have a minimum amount of VA space for kernel objects. */
+ if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
+ return 0;
+
+ if (args->user_va_range) {
+ /* Use the user provided value if != 0. */
+ user_va_range = args->user_va_range;
+ } else if (TASK_SIZE_OF(current) < full_va_range) {
+ /* If the task VM size is smaller than the GPU VA range, pick this
+ * as our default user VA range, so userspace can CPU/GPU map buffers
+ * at the same address.
+ */
+ user_va_range = TASK_SIZE_OF(current);
+ } else {
+ /* If the GPU VA range is smaller than the task VM size, we
+ * just have to live with the fact we won't be able to map
+ * all buffers at the same GPU/CPU address.
+ *
+ * If the GPU VA range is bigger than 4G (more than 32-bit of
+ * VA), we split the range in two, and assign half of it to
+ * the user and the other half to the kernel, if it's not, we
+ * keep the kernel VA space as small as possible.
+ */
+ user_va_range = full_va_range > SZ_4G ?
+ full_va_range / 2 :
+ full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
+ }
+
+ if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
+ user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
+
+ return user_va_range;
+}
+
+#define PANTHOR_VM_CREATE_FLAGS 0
+
+static int
+panthor_vm_create_check_args(const struct panthor_device *ptdev,
+ const struct drm_panthor_vm_create *args,
+ u64 *kernel_va_start, u64 *kernel_va_range)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ u64 full_va_range = 1ull << va_bits;
+ u64 user_va_range;
+
+ if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
+ return -EINVAL;
+
+ user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
+ if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
+ return -EINVAL;
+
+ /* Pick a kernel VA range that's a power of two, to have a clear split. */
+ *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
+ *kernel_va_start = full_va_range - *kernel_va_range;
+ return 0;
+}
+
+/*
+ * Only 32 VMs per open file. If that becomes a limiting factor, we can
+ * increase this number.
+ */
+#define PANTHOR_MAX_VMS_PER_FILE 32
+
+/**
+ * panthor_vm_pool_create_vm() - Create a VM
+ * @pool: The VM to create this VM on.
+ * @kernel_va_start: Start of the region reserved for kernel objects.
+ * @kernel_va_range: Size of the region reserved for kernel objects.
+ *
+ * Return: a positive VM ID on success, a negative error code otherwise.
+ */
+int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
+ struct panthor_vm_pool *pool,
+ struct drm_panthor_vm_create *args)
+{
+ u64 kernel_va_start, kernel_va_range;
+ struct panthor_vm *vm;
+ int ret;
+ u32 id;
+
+ ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
+ if (ret)
+ return ret;
+
+ vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
+ kernel_va_start, kernel_va_range);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ ret = xa_alloc(&pool->xa, &id, vm,
+ XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
+
+ if (ret) {
+ panthor_vm_put(vm);
+ return ret;
+ }
+
+ args->user_va_range = kernel_va_start;
+ return id;
+}
+
+static void panthor_vm_destroy(struct panthor_vm *vm)
+{
+ if (!vm)
+ return;
+
+ vm->destroyed = true;
+
+ mutex_lock(&vm->heaps.lock);
+ panthor_heap_pool_destroy(vm->heaps.pool);
+ vm->heaps.pool = NULL;
+ mutex_unlock(&vm->heaps.lock);
+
+ drm_WARN_ON(&vm->ptdev->base,
+ panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
+ panthor_vm_put(vm);
+}
+
+/**
+ * panthor_vm_pool_destroy_vm() - Destroy a VM.
+ * @pool: VM pool.
+ * @handle: VM handle.
+ *
+ * This function doesn't free the VM object or its resources, it just kills
+ * all mappings, and makes sure nothing can be mapped after that point.
+ *
+ * If there was any active jobs at the time this function is called, these
+ * jobs should experience page faults and be killed as a result.
+ *
+ * The VM resources are freed when the last reference on the VM object is
+ * dropped.
+ */
+int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
+{
+ struct panthor_vm *vm;
+
+ vm = xa_erase(&pool->xa, handle);
+
+ panthor_vm_destroy(vm);
+
+ return vm ? 0 : -EINVAL;
+}
+
+/**
+ * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
+ * @pool: VM pool to check.
+ * @handle: Handle of the VM to retrieve.
+ *
+ * Return: A valid pointer if the VM exists, NULL otherwise.
+ */
+struct panthor_vm *
+panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
+{
+ struct panthor_vm *vm;
+
+ vm = panthor_vm_get(xa_load(&pool->xa, handle));
+
+ return vm;
+}
+
+/**
+ * panthor_vm_pool_destroy() - Destroy a VM pool.
+ * @pfile: File.
+ *
+ * Destroy all VMs in the pool, and release the pool resources.
+ *
+ * Note that VMs can outlive the pool they were created from if other
+ * objects hold a reference to there VMs.
+ */
+void panthor_vm_pool_destroy(struct panthor_file *pfile)
+{
+ struct panthor_vm *vm;
+ unsigned long i;
+
+ if (!pfile->vms)
+ return;
+
+ xa_for_each(&pfile->vms->xa, i, vm)
+ panthor_vm_destroy(vm);
+
+ xa_destroy(&pfile->vms->xa);
+ kfree(pfile->vms);
+}
+
+/**
+ * panthor_vm_pool_create() - Create a VM pool
+ * @pfile: File.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_pool_create(struct panthor_file *pfile)
+{
+ pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
+ if (!pfile->vms)
+ return -ENOMEM;
+
+ xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
+ return 0;
+}
+
+/* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
+static void mmu_tlb_flush_all(void *cookie)
+{
+}
+
+static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops mmu_tlb_ops = {
+ .tlb_flush_all = mmu_tlb_flush_all,
+ .tlb_flush_walk = mmu_tlb_flush_walk,
+};
+
+static const char *access_type_name(struct panthor_device *ptdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ return "ATOMIC";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ drm_WARN_ON(&ptdev->base, 1);
+ return NULL;
+ }
+}
+
+static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ bool has_unhandled_faults = false;
+
+ status = panthor_mmu_fault_mask(ptdev, status);
+ while (status) {
+ u32 as = ffs(status | (status >> 16)) - 1;
+ u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
+ u32 new_int_mask;
+ u64 addr;
+ u32 fault_status;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+
+ fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
+ addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
+ addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+
+ /* decode the fault status */
+ exception_type = fault_status & 0xFF;
+ access_type = (fault_status >> 8) & 0x3;
+ source_id = (fault_status >> 16);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ ptdev->mmu->as.faulty_mask |= mask;
+ new_int_mask =
+ panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
+
+ /* terminal fault, print info about the fault */
+ drm_err(&ptdev->base,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ as, addr,
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panthor_exception_name(ptdev, exception_type),
+ access_type, access_type_name(ptdev, fault_status),
+ source_id);
+
+ /* Ignore MMU interrupts on this AS until it's been
+ * re-enabled.
+ */
+ ptdev->mmu->irq.mask = new_int_mask;
+ gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
+
+ if (ptdev->mmu->as.slots[as].vm)
+ ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
+
+ /* Disable the MMU to kill jobs on this AS. */
+ panthor_mmu_as_disable(ptdev, as);
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ status &= ~mask;
+ has_unhandled_faults = true;
+ }
+
+ if (has_unhandled_faults)
+ panthor_sched_report_mmu_fault(ptdev);
+}
+PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
+
+/**
+ * panthor_mmu_suspend() - Suspend the MMU logic
+ * @ptdev: Device.
+ *
+ * All we do here is de-assign the AS slots on all active VMs, so things
+ * get flushed to the main memory, and no further access to these VMs are
+ * possible.
+ *
+ * We also suspend the MMU IRQ.
+ */
+void panthor_mmu_suspend(struct panthor_device *ptdev)
+{
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm) {
+ drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
+ panthor_vm_release_as_locked(vm);
+ }
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+}
+
+/**
+ * panthor_mmu_resume() - Resume the MMU logic
+ * @ptdev: Device.
+ *
+ * Resume the IRQ.
+ *
+ * We don't re-enable previously active VMs. We assume other parts of the
+ * driver will call panthor_vm_active() on the VMs they intend to use.
+ */
+void panthor_mmu_resume(struct panthor_device *ptdev)
+{
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ ptdev->mmu->as.alloc_mask = 0;
+ ptdev->mmu->as.faulty_mask = 0;
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+}
+
+/**
+ * panthor_mmu_pre_reset() - Prepare for a reset
+ * @ptdev: Device.
+ *
+ * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
+ * don't get asked to do a VM operation while the GPU is down.
+ *
+ * We don't cleanly shutdown the AS slots here, because the reset might
+ * come from an AS_ACTIVE_BIT stuck situation.
+ */
+void panthor_mmu_pre_reset(struct panthor_device *ptdev)
+{
+ struct panthor_vm *vm;
+
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ ptdev->mmu->vm.reset_in_progress = true;
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
+ panthor_vm_stop(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+}
+
+/**
+ * panthor_mmu_post_reset() - Restore things after a reset
+ * @ptdev: Device.
+ *
+ * Put the MMU logic back in action after a reset. That implies resuming the
+ * IRQ and re-enabling the VM_BIND queues.
+ */
+void panthor_mmu_post_reset(struct panthor_device *ptdev)
+{
+ struct panthor_vm *vm;
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+
+ /* Now that the reset is effective, we can assume that none of the
+ * AS slots are setup, and clear the faulty flags too.
+ */
+ ptdev->mmu->as.alloc_mask = 0;
+ ptdev->mmu->as.faulty_mask = 0;
+
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm)
+ panthor_vm_release_as_locked(vm);
+ }
+
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+
+ /* Restart the VM_BIND queues. */
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
+ panthor_vm_start(vm);
+ }
+ ptdev->mmu->vm.reset_in_progress = false;
+ mutex_unlock(&ptdev->mmu->vm.lock);
+}
+
+static void panthor_vm_free(struct drm_gpuvm *gpuvm)
+{
+ struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
+ struct panthor_device *ptdev = vm->ptdev;
+
+ mutex_lock(&vm->heaps.lock);
+ if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
+ panthor_heap_pool_destroy(vm->heaps.pool);
+ mutex_unlock(&vm->heaps.lock);
+ mutex_destroy(&vm->heaps.lock);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_del(&vm->node);
+ /* Restore the scheduler state so we can call drm_sched_entity_destroy()
+ * and drm_sched_fini(). If get there, that means we have no job left
+ * and no new jobs can be queued, so we can start the scheduler without
+ * risking interfering with the reset.
+ */
+ if (ptdev->mmu->vm.reset_in_progress)
+ panthor_vm_start(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ drm_sched_entity_destroy(&vm->entity);
+ drm_sched_fini(&vm->sched);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ if (vm->as.id >= 0) {
+ int cookie;
+
+ if (drm_dev_enter(&ptdev->base, &cookie)) {
+ panthor_mmu_as_disable(ptdev, vm->as.id);
+ drm_dev_exit(cookie);
+ }
+
+ ptdev->mmu->as.slots[vm->as.id].vm = NULL;
+ clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
+ list_del(&vm->as.lru_node);
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+
+ free_io_pgtable_ops(vm->pgtbl_ops);
+
+ drm_mm_takedown(&vm->mm);
+ kfree(vm);
+}
+
+/**
+ * panthor_vm_put() - Release a reference on a VM
+ * @vm: VM to release the reference on. Can be NULL.
+ */
+void panthor_vm_put(struct panthor_vm *vm)
+{
+ drm_gpuvm_put(vm ? &vm->base : NULL);
+}
+
+/**
+ * panthor_vm_get() - Get a VM reference
+ * @vm: VM to get the reference on. Can be NULL.
+ *
+ * Return: @vm value.
+ */
+struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
+{
+ if (vm)
+ drm_gpuvm_get(&vm->base);
+
+ return vm;
+}
+
+/**
+ * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
+ * @vm: VM to query the heap pool on.
+ * @create: True if the heap pool should be created when it doesn't exist.
+ *
+ * Heap pools are per-VM. This function allows one to retrieve the heap pool
+ * attached to a VM.
+ *
+ * If no heap pool exists yet, and @create is true, we create one.
+ *
+ * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
+{
+ struct panthor_heap_pool *pool;
+
+ mutex_lock(&vm->heaps.lock);
+ if (!vm->heaps.pool && create) {
+ if (vm->destroyed)
+ pool = ERR_PTR(-EINVAL);
+ else
+ pool = panthor_heap_pool_create(vm->ptdev, vm);
+
+ if (!IS_ERR(pool))
+ vm->heaps.pool = panthor_heap_pool_get(pool);
+ } else {
+ pool = panthor_heap_pool_get(vm->heaps.pool);
+ if (!pool)
+ pool = ERR_PTR(-ENOENT);
+ }
+ mutex_unlock(&vm->heaps.lock);
+
+ return pool;
+}
+
+static u64 mair_to_memattr(u64 mair)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ /* Use SH_CPU_INNER mode so SH_IS, which is used when
+ * IOMMU_CACHE is set, actually maps to the standard
+ * definition of inner-shareable and not Mali's
+ * internal-shareable mode.
+ */
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_SH_CPU_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
+static void panthor_vma_link(struct panthor_vm *vm,
+ struct panthor_vma *vma,
+ struct drm_gpuvm_bo *vm_bo)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
+
+ mutex_lock(&bo->gpuva_list_lock);
+ drm_gpuva_link(&vma->base, vm_bo);
+ drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
+ mutex_unlock(&bo->gpuva_list_lock);
+}
+
+static void panthor_vma_unlink(struct panthor_vm *vm,
+ struct panthor_vma *vma)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
+ struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
+
+ mutex_lock(&bo->gpuva_list_lock);
+ drm_gpuva_unlink(&vma->base);
+ mutex_unlock(&bo->gpuva_list_lock);
+
+ /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
+ * when entering this function, so we can implement deferred VMA
+ * destruction. Re-assign it here.
+ */
+ vma->base.vm_bo = vm_bo;
+ list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
+}
+
+static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
+{
+ INIT_LIST_HEAD(&vma->node);
+ vma->flags = flags;
+}
+
+#define PANTHOR_VM_MAP_FLAGS \
+ (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
+
+static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
+{
+ struct panthor_vm *vm = priv;
+ struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
+ struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ int ret;
+
+ if (!vma)
+ return -EINVAL;
+
+ panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
+
+ ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
+ op_ctx->map.sgt, op->map.gem.offset,
+ op->map.va.range);
+ if (ret)
+ return ret;
+
+ /* Ref owned by the mapping now, clear the obj field so we don't release the
+ * pinning/obj ref behind GPUVA's back.
+ */
+ drm_gpuva_map(&vm->base, &vma->base, &op->map);
+ panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
+ op_ctx->map.vm_bo = NULL;
+ return 0;
+}
+
+static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
+ void *priv)
+{
+ struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
+ struct panthor_vm *vm = priv;
+ struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
+ struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
+ u64 unmap_start, unmap_range;
+ int ret;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
+ ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
+ if (ret)
+ return ret;
+
+ if (op->remap.prev) {
+ prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ panthor_vma_init(prev_vma, unmap_vma->flags);
+ }
+
+ if (op->remap.next) {
+ next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
+ panthor_vma_init(next_vma, unmap_vma->flags);
+ }
+
+ drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
+ next_vma ? &next_vma->base : NULL,
+ &op->remap);
+
+ if (prev_vma) {
+ /* panthor_vma_link() transfers the vm_bo ownership to
+ * the VMA object. Since the vm_bo we're passing is still
+ * owned by the old mapping which will be released when this
+ * mapping is destroyed, we need to grab a ref here.
+ */
+ panthor_vma_link(vm, prev_vma,
+ drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ }
+
+ if (next_vma) {
+ panthor_vma_link(vm, next_vma,
+ drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ }
+
+ panthor_vma_unlink(vm, unmap_vma);
+ return 0;
+}
+
+static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
+ void *priv)
+{
+ struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
+ struct panthor_vm *vm = priv;
+ int ret;
+
+ ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
+ unmap_vma->base.va.range);
+ if (drm_WARN_ON(&vm->ptdev->base, ret))
+ return ret;
+
+ drm_gpuva_unmap(&op->unmap);
+ panthor_vma_unlink(vm, unmap_vma);
+ return 0;
+}
+
+static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
+ .vm_free = panthor_vm_free,
+ .sm_step_map = panthor_gpuva_sm_step_map,
+ .sm_step_remap = panthor_gpuva_sm_step_remap,
+ .sm_step_unmap = panthor_gpuva_sm_step_unmap,
+};
+
+/**
+ * panthor_vm_resv() - Get the dma_resv object attached to a VM.
+ * @vm: VM to get the dma_resv of.
+ *
+ * Return: A dma_resv object.
+ */
+struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
+{
+ return drm_gpuvm_resv(&vm->base);
+}
+
+struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
+{
+ if (!vm)
+ return NULL;
+
+ return vm->base.r_obj;
+}
+
+static int
+panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
+ bool flag_vm_unusable_on_failure)
+{
+ u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
+ int ret;
+
+ if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
+ return 0;
+
+ mutex_lock(&vm->op_lock);
+ vm->op_ctx = op;
+ switch (op_type) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ if (vm->unusable) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
+ op->map.vm_bo->obj, op->map.bo_offset);
+ break;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret && flag_vm_unusable_on_failure)
+ vm->unusable = true;
+
+ vm->op_ctx = NULL;
+ mutex_unlock(&vm->op_lock);
+
+ return ret;
+}
+
+static struct dma_fence *
+panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+ bool cookie;
+ int ret;
+
+ /* Not only we report an error whose result is propagated to the
+ * drm_sched finished fence, but we also flag the VM as unusable, because
+ * a failure in the async VM_BIND results in an inconsistent state. VM needs
+ * to be destroyed and recreated.
+ */
+ cookie = dma_fence_begin_signalling();
+ ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
+ dma_fence_end_signalling(cookie);
+
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+static void panthor_vm_bind_job_release(struct kref *kref)
+{
+ struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+
+ panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
+ panthor_vm_put(job->vm);
+ kfree(job);
+}
+
+/**
+ * panthor_vm_bind_job_put() - Release a VM_BIND job reference
+ * @sched_job: Job to release the reference on.
+ */
+void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ if (sched_job)
+ kref_put(&job->refcount, panthor_vm_bind_job_release);
+}
+
+static void
+panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Do the heavy cleanups asynchronously, so we're out of the
+ * dma-signaling path and can acquire dma-resv locks safely.
+ */
+ queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
+}
+
+static enum drm_gpu_sched_stat
+panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
+{
+ WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
+ .run_job = panthor_vm_bind_run_job,
+ .free_job = panthor_vm_bind_free_job,
+ .timedout_job = panthor_vm_bind_timedout_job,
+};
+
+/**
+ * panthor_vm_create() - Create a VM
+ * @ptdev: Device.
+ * @for_mcu: True if this is the FW MCU VM.
+ * @kernel_va_start: Start of the range reserved for kernel BO mapping.
+ * @kernel_va_size: Size of the range reserved for kernel BO mapping.
+ * @auto_kernel_va_start: Start of the auto-VA kernel range.
+ * @auto_kernel_va_size: Size of the auto-VA kernel range.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct panthor_vm *
+panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
+ u64 kernel_va_start, u64 kernel_va_size,
+ u64 auto_kernel_va_start, u64 auto_kernel_va_size)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
+ u64 full_va_range = 1ull << va_bits;
+ struct drm_gem_object *dummy_gem;
+ struct drm_gpu_scheduler *sched;
+ struct io_pgtable_cfg pgtbl_cfg;
+ u64 mair, min_va, va_range;
+ struct panthor_vm *vm;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return ERR_PTR(-ENOMEM);
+
+ /* We allocate a dummy GEM for the VM. */
+ dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
+ if (!dummy_gem) {
+ ret = -ENOMEM;
+ goto err_free_vm;
+ }
+
+ mutex_init(&vm->heaps.lock);
+ vm->for_mcu = for_mcu;
+ vm->ptdev = ptdev;
+ mutex_init(&vm->op_lock);
+
+ if (for_mcu) {
+ /* CSF MCU is a cortex M7, and can only address 4G */
+ min_va = 0;
+ va_range = SZ_4G;
+ } else {
+ min_va = 0;
+ va_range = full_va_range;
+ }
+
+ mutex_init(&vm->mm_lock);
+ drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
+ vm->kernel_auto_va.start = auto_kernel_va_start;
+ vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
+
+ INIT_LIST_HEAD(&vm->node);
+ INIT_LIST_HEAD(&vm->as.lru_node);
+ vm->as.id = -1;
+ refcount_set(&vm->as.active_cnt, 0);
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = va_bits,
+ .oas = pa_bits,
+ .coherent_walk = ptdev->coherent,
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = ptdev->base.dev,
+ .alloc = alloc_pt,
+ .free = free_pt,
+ };
+
+ vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
+ if (!vm->pgtbl_ops) {
+ ret = -EINVAL;
+ goto err_mm_takedown;
+ }
+
+ /* Bind operations are synchronous for now, no timeout needed. */
+ ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
+ 1, 1, 0,
+ MAX_SCHEDULE_TIMEOUT, NULL, NULL,
+ "panthor-vm-bind", ptdev->base.dev);
+ if (ret)
+ goto err_free_io_pgtable;
+
+ sched = &vm->sched;
+ ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
+ if (ret)
+ goto err_sched_fini;
+
+ mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
+ vm->memattr = mair_to_memattr(mair);
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_add_tail(&vm->node, &ptdev->mmu->vm.list);
+
+ /* If a reset is in progress, stop the scheduler. */
+ if (ptdev->mmu->vm.reset_in_progress)
+ panthor_vm_stop(vm);
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ /* We intentionally leave the reserved range to zero, because we want kernel VMAs
+ * to be handled the same way user VMAs are.
+ */
+ drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
+ DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
+ min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ drm_gem_object_put(dummy_gem);
+ return vm;
+
+err_sched_fini:
+ drm_sched_fini(&vm->sched);
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(vm->pgtbl_ops);
+
+err_mm_takedown:
+ drm_mm_takedown(&vm->mm);
+ drm_gem_object_put(dummy_gem);
+
+err_free_vm:
+ kfree(vm);
+ return ERR_PTR(ret);
+}
+
+static int
+panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op,
+ struct panthor_vm_op_ctx *op_ctx)
+{
+ struct drm_gem_object *gem;
+ int ret;
+
+ /* Aligned on page size. */
+ if ((op->va | op->size) & ~PAGE_MASK)
+ return -EINVAL;
+
+ switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ gem = drm_gem_object_lookup(file, op->bo_handle);
+ ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
+ gem ? to_panthor_bo(gem) : NULL,
+ op->bo_offset,
+ op->size,
+ op->va,
+ op->flags);
+ drm_gem_object_put(gem);
+ return ret;
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
+ if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+ return -EINVAL;
+
+ if (op->bo_handle || op->bo_offset)
+ return -EINVAL;
+
+ return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
+
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
+ if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
+ return -EINVAL;
+
+ if (op->bo_handle || op->bo_offset)
+ return -EINVAL;
+
+ if (op->va || op->size)
+ return -EINVAL;
+
+ if (!op->syncs.count)
+ return -EINVAL;
+
+ panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
+{
+ struct panthor_vm_bind_job *job =
+ container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
+
+ panthor_vm_bind_job_put(&job->base);
+}
+
+/**
+ * panthor_vm_bind_job_create() - Create a VM_BIND job
+ * @file: File.
+ * @vm: VM targeted by the VM_BIND job.
+ * @op: VM operation data.
+ *
+ * Return: A valid pointer on success, an ERR_PTR() otherwise.
+ */
+struct drm_sched_job *
+panthor_vm_bind_job_create(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op)
+{
+ struct panthor_vm_bind_job *job;
+ int ret;
+
+ if (!vm)
+ return ERR_PTR(-EINVAL);
+
+ if (vm->destroyed || vm->unusable)
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
+ if (ret) {
+ kfree(job);
+ return ERR_PTR(ret);
+ }
+
+ INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
+ kref_init(&job->refcount);
+ job->vm = panthor_vm_get(vm);
+
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ if (ret)
+ goto err_put_job;
+
+ return &job->base;
+
+err_put_job:
+ panthor_vm_bind_job_put(&job->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
+ * @exec: The locking/preparation context.
+ * @sched_job: The job to prepare resvs on.
+ *
+ * Locks and prepare the VM resv.
+ *
+ * If this is a map operation, locks and prepares the GEM resv.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
+ struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+ int ret;
+
+ /* Acquire the VM lock an reserve a slot for this VM bind job. */
+ ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
+ if (ret)
+ return ret;
+
+ if (job->ctx.map.vm_bo) {
+ /* Lock/prepare the GEM being mapped. */
+ ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
+ * @exec: drm_exec context.
+ * @sched_job: Job to update the resvs on.
+ */
+void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
+ struct drm_sched_job *sched_job)
+{
+ struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
+
+ /* Explicit sync => we just register our job finished fence as bookkeep. */
+ drm_gpuvm_resv_add_fence(&job->vm->base, exec,
+ &sched_job->s_fence->finished,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+}
+
+void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
+}
+
+/**
+ * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
+ * @file: File.
+ * @vm: VM targeted by the VM operation.
+ * @op: Data describing the VM operation.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_bind_exec_sync_op(struct drm_file *file,
+ struct panthor_vm *vm,
+ struct drm_panthor_vm_bind_op *op)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ /* No sync objects allowed on synchronous operations. */
+ if (op->syncs.count)
+ return -EINVAL;
+
+ if (!op->size)
+ return 0;
+
+ ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_map_bo_range() - Map a GEM object range to a VM
+ * @vm: VM to map the GEM to.
+ * @bo: GEM object to map.
+ * @offset: Offset in the GEM object.
+ * @size: Size to map.
+ * @va: Virtual address to map the object to.
+ * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
+ * Only map-related flags are valid.
+ *
+ * Internal use only. For userspace requests, use
+ * panthor_vm_bind_exec_sync_op() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
+ u64 offset, u64 size, u64 va, u32 flags)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_unmap_range() - Unmap a portion of the VA space
+ * @vm: VM to unmap the region from.
+ * @va: Virtual address to unmap. Must be 4k aligned.
+ * @size: Size of the region to unmap. Must be 4k aligned.
+ *
+ * Internal use only. For userspace requests, use
+ * panthor_vm_bind_exec_sync_op() instead.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
+{
+ struct panthor_vm_op_ctx op_ctx;
+ int ret;
+
+ ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_exec_op(vm, &op_ctx, false);
+ panthor_vm_cleanup_op_ctx(&op_ctx, vm);
+
+ return ret;
+}
+
+/**
+ * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
+ * @exec: Locking/preparation context.
+ * @vm: VM targeted by the GPU job.
+ * @slot_count: Number of slots to reserve.
+ *
+ * GPU jobs assume all BOs bound to the VM at the time the job is submitted
+ * are available when the job is executed. In order to guarantee that, we
+ * need to reserve a slot on all BOs mapped to a VM and update this slot with
+ * the job fence after its submission.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
+ u32 slot_count)
+{
+ int ret;
+
+ /* Acquire the VM lock and reserve a slot for this GPU job. */
+ ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
+ if (ret)
+ return ret;
+
+ return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
+}
+
+/**
+ * panthor_mmu_unplug() - Unplug the MMU logic
+ * @ptdev: Device.
+ *
+ * No access to the MMU regs should be done after this function is called.
+ * We suspend the IRQ and disable all VMs to guarantee that.
+ */
+void panthor_mmu_unplug(struct panthor_device *ptdev)
+{
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+
+ mutex_lock(&ptdev->mmu->as.slots_lock);
+ for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
+ struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
+
+ if (vm) {
+ drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
+ panthor_vm_release_as_locked(vm);
+ }
+ }
+ mutex_unlock(&ptdev->mmu->as.slots_lock);
+}
+
+static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
+{
+ destroy_workqueue(res);
+}
+
+/**
+ * panthor_mmu_init() - Initialize the MMU logic.
+ * @ptdev: Device.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_mmu_init(struct panthor_device *ptdev)
+{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
+ struct panthor_mmu *mmu;
+ int ret, irq;
+
+ mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mmu->as.lru_list);
+
+ ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&mmu->vm.list);
+ ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
+ if (ret)
+ return ret;
+
+ ptdev->mmu = mmu;
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
+ panthor_mmu_fault_mask(ptdev, ~0));
+ if (ret)
+ return ret;
+
+ mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
+ if (!mmu->vm.wq)
+ return -ENOMEM;
+
+ /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
+ * which passes iova as an unsigned long. Patch the mmu_features to reflect this
+ * limitation.
+ */
+ if (sizeof(unsigned long) * 8 < va_bits) {
+ ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
+ ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
+ }
+
+ return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
+{
+ int ret;
+
+ mutex_lock(&vm->op_lock);
+ ret = drm_debugfs_gpuva_info(m, &vm->base);
+ mutex_unlock(&vm->op_lock);
+
+ return ret;
+}
+
+static int show_each_vm(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
+ int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
+ struct panthor_vm *vm;
+ int ret = 0;
+
+ mutex_lock(&ptdev->mmu->vm.lock);
+ list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
+ ret = show(vm, m);
+ if (ret < 0)
+ break;
+
+ seq_puts(m, "\n");
+ }
+ mutex_unlock(&ptdev->mmu->vm.lock);
+
+ return ret;
+}
+
+static struct drm_info_list panthor_mmu_debugfs_list[] = {
+ DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
+};
+
+/**
+ * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
+ * @minor: Minor.
+ */
+void panthor_mmu_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_mmu_debugfs_list,
+ ARRAY_SIZE(panthor_mmu_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * panthor_mmu_pt_cache_init() - Initialize the page table cache.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int panthor_mmu_pt_cache_init(void)
+{
+ pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
+ if (!pt_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
+ */
+void panthor_mmu_pt_cache_fini(void)
+{
+ kmem_cache_destroy(pt_cache);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
new file mode 100644
index 000000000000..f3c1ed19f973
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_MMU_H__
+#define __PANTHOR_MMU_H__
+
+#include <linux/dma-resv.h>
+
+struct drm_exec;
+struct drm_sched_job;
+struct panthor_gem_object;
+struct panthor_heap_pool;
+struct panthor_vm;
+struct panthor_vma;
+struct panthor_mmu;
+
+int panthor_mmu_init(struct panthor_device *ptdev);
+void panthor_mmu_unplug(struct panthor_device *ptdev);
+void panthor_mmu_pre_reset(struct panthor_device *ptdev);
+void panthor_mmu_post_reset(struct panthor_device *ptdev);
+void panthor_mmu_suspend(struct panthor_device *ptdev);
+void panthor_mmu_resume(struct panthor_device *ptdev);
+
+int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
+ u64 offset, u64 size, u64 va, u32 flags);
+int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size);
+struct panthor_gem_object *
+panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
+
+int panthor_vm_active(struct panthor_vm *vm);
+void panthor_vm_idle(struct panthor_vm *vm);
+int panthor_vm_as(struct panthor_vm *vm);
+
+struct panthor_heap_pool *
+panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+
+struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
+void panthor_vm_put(struct panthor_vm *vm);
+struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
+ u64 kernel_va_start, u64 kernel_va_size,
+ u64 kernel_auto_va_start,
+ u64 kernel_auto_va_size);
+
+int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec,
+ struct panthor_vm *vm,
+ u32 slot_count);
+int panthor_vm_add_bos_resvs_deps_to_job(struct panthor_vm *vm,
+ struct drm_sched_job *job);
+void panthor_vm_add_job_fence_to_bos_resvs(struct panthor_vm *vm,
+ struct drm_sched_job *job);
+
+struct dma_resv *panthor_vm_resv(struct panthor_vm *vm);
+struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm);
+
+void panthor_vm_pool_destroy(struct panthor_file *pfile);
+int panthor_vm_pool_create(struct panthor_file *pfile);
+int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
+ struct panthor_vm_pool *pool,
+ struct drm_panthor_vm_create *args);
+int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle);
+struct panthor_vm *panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle);
+
+bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm);
+bool panthor_vm_is_unusable(struct panthor_vm *vm);
+
+/*
+ * PANTHOR_VM_KERNEL_AUTO_VA: Use this magic address when you want the GEM
+ * logic to auto-allocate the virtual address in the reserved kernel VA range.
+ */
+#define PANTHOR_VM_KERNEL_AUTO_VA ~0ull
+
+int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
+ struct drm_mm_node *va_node);
+void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node);
+
+int panthor_vm_bind_exec_sync_op(struct drm_file *file,
+ struct panthor_vm *vm,
+ struct drm_panthor_vm_bind_op *op);
+
+struct drm_sched_job *
+panthor_vm_bind_job_create(struct drm_file *file,
+ struct panthor_vm *vm,
+ const struct drm_panthor_vm_bind_op *op);
+void panthor_vm_bind_job_put(struct drm_sched_job *job);
+int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
+ struct drm_sched_job *job);
+void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
+
+void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+int panthor_mmu_pt_cache_init(void);
+void panthor_mmu_pt_cache_fini(void);
+
+#ifdef CONFIG_DEBUG_FS
+void panthor_mmu_debugfs_init(struct drm_minor *minor);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
new file mode 100644
index 000000000000..b7b3b3add166
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2023 Collabora ltd. */
+/*
+ * Register definitions based on mali_kbase_gpu_regmap.h and
+ * mali_kbase_gpu_regmap_csf.h
+ * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
+ */
+#ifndef __PANTHOR_REGS_H__
+#define __PANTHOR_REGS_H__
+
+#define GPU_ID 0x0
+#define GPU_ARCH_MAJOR(x) ((x) >> 28)
+#define GPU_ARCH_MINOR(x) (((x) & GENMASK(27, 24)) >> 24)
+#define GPU_ARCH_REV(x) (((x) & GENMASK(23, 20)) >> 20)
+#define GPU_PROD_MAJOR(x) (((x) & GENMASK(19, 16)) >> 16)
+#define GPU_VER_MAJOR(x) (((x) & GENMASK(15, 12)) >> 12)
+#define GPU_VER_MINOR(x) (((x) & GENMASK(11, 4)) >> 4)
+#define GPU_VER_STATUS(x) ((x) & GENMASK(3, 0))
+
+#define GPU_L2_FEATURES 0x4
+#define GPU_L2_FEATURES_LINE_SIZE(x) (1 << ((x) & GENMASK(7, 0)))
+
+#define GPU_CORE_FEATURES 0x8
+
+#define GPU_TILER_FEATURES 0xC
+#define GPU_MEM_FEATURES 0x10
+#define GROUPS_L2_COHERENT BIT(0)
+
+#define GPU_MMU_FEATURES 0x14
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
+#define GPU_AS_PRESENT 0x18
+#define GPU_CSF_ID 0x1C
+
+#define GPU_INT_RAWSTAT 0x20
+#define GPU_INT_CLEAR 0x24
+#define GPU_INT_MASK 0x28
+#define GPU_INT_STAT 0x2c
+#define GPU_IRQ_FAULT BIT(0)
+#define GPU_IRQ_PROTM_FAULT BIT(1)
+#define GPU_IRQ_RESET_COMPLETED BIT(8)
+#define GPU_IRQ_POWER_CHANGED BIT(9)
+#define GPU_IRQ_POWER_CHANGED_ALL BIT(10)
+#define GPU_IRQ_CLEAN_CACHES_COMPLETED BIT(17)
+#define GPU_IRQ_DOORBELL_MIRROR BIT(18)
+#define GPU_IRQ_MCU_STATUS_CHANGED BIT(19)
+#define GPU_CMD 0x30
+#define GPU_CMD_DEF(type, payload) ((type) | ((payload) << 8))
+#define GPU_SOFT_RESET GPU_CMD_DEF(1, 1)
+#define GPU_HARD_RESET GPU_CMD_DEF(1, 2)
+#define CACHE_CLEAN BIT(0)
+#define CACHE_INV BIT(1)
+#define GPU_FLUSH_CACHES(l2, lsc, oth) \
+ GPU_CMD_DEF(4, ((l2) << 0) | ((lsc) << 4) | ((oth) << 8))
+
+#define GPU_STATUS 0x34
+#define GPU_STATUS_ACTIVE BIT(0)
+#define GPU_STATUS_PWR_ACTIVE BIT(1)
+#define GPU_STATUS_PAGE_FAULT BIT(4)
+#define GPU_STATUS_PROTM_ACTIVE BIT(7)
+#define GPU_STATUS_DBG_ENABLED BIT(8)
+
+#define GPU_FAULT_STATUS 0x3C
+#define GPU_FAULT_ADDR_LO 0x40
+#define GPU_FAULT_ADDR_HI 0x44
+
+#define GPU_PWR_KEY 0x50
+#define GPU_PWR_KEY_UNLOCK 0x2968A819
+#define GPU_PWR_OVERRIDE0 0x54
+#define GPU_PWR_OVERRIDE1 0x58
+
+#define GPU_TIMESTAMP_OFFSET_LO 0x88
+#define GPU_TIMESTAMP_OFFSET_HI 0x8C
+#define GPU_CYCLE_COUNT_LO 0x90
+#define GPU_CYCLE_COUNT_HI 0x94
+#define GPU_TIMESTAMP_LO 0x98
+#define GPU_TIMESTAMP_HI 0x9C
+
+#define GPU_THREAD_MAX_THREADS 0xA0
+#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
+#define GPU_THREAD_MAX_BARRIER_SIZE 0xA8
+#define GPU_THREAD_FEATURES 0xAC
+
+#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
+
+#define GPU_SHADER_PRESENT_LO 0x100
+#define GPU_SHADER_PRESENT_HI 0x104
+#define GPU_TILER_PRESENT_LO 0x110
+#define GPU_TILER_PRESENT_HI 0x114
+#define GPU_L2_PRESENT_LO 0x120
+#define GPU_L2_PRESENT_HI 0x124
+
+#define SHADER_READY_LO 0x140
+#define SHADER_READY_HI 0x144
+#define TILER_READY_LO 0x150
+#define TILER_READY_HI 0x154
+#define L2_READY_LO 0x160
+#define L2_READY_HI 0x164
+
+#define SHADER_PWRON_LO 0x180
+#define SHADER_PWRON_HI 0x184
+#define TILER_PWRON_LO 0x190
+#define TILER_PWRON_HI 0x194
+#define L2_PWRON_LO 0x1A0
+#define L2_PWRON_HI 0x1A4
+
+#define SHADER_PWROFF_LO 0x1C0
+#define SHADER_PWROFF_HI 0x1C4
+#define TILER_PWROFF_LO 0x1D0
+#define TILER_PWROFF_HI 0x1D4
+#define L2_PWROFF_LO 0x1E0
+#define L2_PWROFF_HI 0x1E4
+
+#define SHADER_PWRTRANS_LO 0x200
+#define SHADER_PWRTRANS_HI 0x204
+#define TILER_PWRTRANS_LO 0x210
+#define TILER_PWRTRANS_HI 0x214
+#define L2_PWRTRANS_LO 0x220
+#define L2_PWRTRANS_HI 0x224
+
+#define SHADER_PWRACTIVE_LO 0x240
+#define SHADER_PWRACTIVE_HI 0x244
+#define TILER_PWRACTIVE_LO 0x250
+#define TILER_PWRACTIVE_HI 0x254
+#define L2_PWRACTIVE_LO 0x260
+#define L2_PWRACTIVE_HI 0x264
+
+#define GPU_REVID 0x280
+
+#define GPU_COHERENCY_FEATURES 0x300
+#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
+
+#define GPU_COHERENCY_PROTOCOL 0x304
+#define GPU_COHERENCY_ACE 0
+#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_NONE 31
+
+#define MCU_CONTROL 0x700
+#define MCU_CONTROL_ENABLE 1
+#define MCU_CONTROL_AUTO 2
+#define MCU_CONTROL_DISABLE 0
+
+#define MCU_STATUS 0x704
+#define MCU_STATUS_DISABLED 0
+#define MCU_STATUS_ENABLED 1
+#define MCU_STATUS_HALT 2
+#define MCU_STATUS_FATAL 3
+
+/* Job Control regs */
+#define JOB_INT_RAWSTAT 0x1000
+#define JOB_INT_CLEAR 0x1004
+#define JOB_INT_MASK 0x1008
+#define JOB_INT_STAT 0x100c
+#define JOB_INT_GLOBAL_IF BIT(31)
+#define JOB_INT_CSG_IF(x) BIT(x)
+
+/* MMU regs */
+#define MMU_INT_RAWSTAT 0x2000
+#define MMU_INT_CLEAR 0x2004
+#define MMU_INT_MASK 0x2008
+#define MMU_INT_STAT 0x200c
+
+/* AS_COMMAND register commands */
+
+#define MMU_BASE 0x2400
+#define MMU_AS_SHIFT 6
+#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
+
+#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
+#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
+#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
+#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
+#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
+#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
+#define AS_COMMAND_NOP 0
+#define AS_COMMAND_UPDATE 1
+#define AS_COMMAND_LOCK 2
+#define AS_COMMAND_UNLOCK 3
+#define AS_COMMAND_FLUSH_PT 4
+#define AS_COMMAND_FLUSH_MEM 5
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+#define AS_FAULTSTATUS(as) (MMU_AS(as) + 0x1C)
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
+#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_STATUS(as) (MMU_AS(as) + 0x28)
+#define AS_STATUS_AS_ACTIVE BIT(0)
+#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
+#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
+#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
+#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+
+#define CSF_GPU_LATEST_FLUSH_ID 0x10000
+
+#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
+#define CSF_GLB_DOORBELL_ID 0
+
+#define gpu_write(dev, reg, data) \
+ writel(data, (dev)->iomem + (reg))
+
+#define gpu_read(dev, reg) \
+ readl((dev)->iomem + (reg))
+
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
new file mode 100644
index 000000000000..b3a51a6de523
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -0,0 +1,3499 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2023 Collabora ltd. */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panthor_drm.h>
+
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-resv.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/iosys-map.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "panthor_devfreq.h"
+#include "panthor_device.h"
+#include "panthor_fw.h"
+#include "panthor_gem.h"
+#include "panthor_gpu.h"
+#include "panthor_heap.h"
+#include "panthor_mmu.h"
+#include "panthor_regs.h"
+#include "panthor_sched.h"
+
+/**
+ * DOC: Scheduler
+ *
+ * Mali CSF hardware adopts a firmware-assisted scheduling model, where
+ * the firmware takes care of scheduling aspects, to some extent.
+ *
+ * The scheduling happens at the scheduling group level, each group
+ * contains 1 to N queues (N is FW/hardware dependent, and exposed
+ * through the firmware interface). Each queue is assigned a command
+ * stream ring buffer, which serves as a way to get jobs submitted to
+ * the GPU, among other things.
+ *
+ * The firmware can schedule a maximum of M groups (M is FW/hardware
+ * dependent, and exposed through the firmware interface). Passed
+ * this maximum number of groups, the kernel must take care of
+ * rotating the groups passed to the firmware so every group gets
+ * a chance to have his queues scheduled for execution.
+ *
+ * The current implementation only supports with kernel-mode queues.
+ * In other terms, userspace doesn't have access to the ring-buffer.
+ * Instead, userspace passes indirect command stream buffers that are
+ * called from the queue ring-buffer by the kernel using a pre-defined
+ * sequence of command stream instructions to ensure the userspace driver
+ * always gets consistent results (cache maintenance,
+ * synchronization, ...).
+ *
+ * We rely on the drm_gpu_scheduler framework to deal with job
+ * dependencies and submission. As any other driver dealing with a
+ * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
+ * entity has its own job scheduler. When a job is ready to be executed
+ * (all its dependencies are met), it is pushed to the appropriate
+ * queue ring-buffer, and the group is scheduled for execution if it
+ * wasn't already active.
+ *
+ * Kernel-side group scheduling is timeslice-based. When we have less
+ * groups than there are slots, the periodic tick is disabled and we
+ * just let the FW schedule the active groups. When there are more
+ * groups than slots, we let each group a chance to execute stuff for
+ * a given amount of time, and then re-evaluate and pick new groups
+ * to schedule. The group selection algorithm is based on
+ * priority+round-robin.
+ *
+ * Even though user-mode queues is out of the scope right now, the
+ * current design takes them into account by avoiding any guess on the
+ * group/queue state that would be based on information we wouldn't have
+ * if userspace was in charge of the ring-buffer. That's also one of the
+ * reason we don't do 'cooperative' scheduling (encoding FW group slot
+ * reservation as dma_fence that would be returned from the
+ * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
+ * a queue of waiters, ordered by job submission order). This approach
+ * would work for kernel-mode queues, but would make user-mode queues a
+ * lot more complicated to retrofit.
+ */
+
+#define JOB_TIMEOUT_MS 5000
+
+#define MIN_CS_PER_CSG 8
+
+#define MIN_CSGS 3
+#define MAX_CSG_PRIO 0xf
+
+struct panthor_group;
+
+/**
+ * struct panthor_csg_slot - Command stream group slot
+ *
+ * This represents a FW slot for a scheduling group.
+ */
+struct panthor_csg_slot {
+ /** @group: Scheduling group bound to this slot. */
+ struct panthor_group *group;
+
+ /** @priority: Group priority. */
+ u8 priority;
+
+ /**
+ * @idle: True if the group bound to this slot is idle.
+ *
+ * A group is idle when it has nothing waiting for execution on
+ * all its queues, or when queues are blocked waiting for something
+ * to happen (synchronization object).
+ */
+ bool idle;
+};
+
+/**
+ * enum panthor_csg_priority - Group priority
+ */
+enum panthor_csg_priority {
+ /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
+ PANTHOR_CSG_PRIORITY_LOW = 0,
+
+ /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
+ PANTHOR_CSG_PRIORITY_MEDIUM,
+
+ /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
+ PANTHOR_CSG_PRIORITY_HIGH,
+
+ /**
+ * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
+ *
+ * Real-time priority allows one to preempt scheduling of other
+ * non-real-time groups. When such a group becomes executable,
+ * it will evict the group with the lowest non-rt priority if
+ * there's no free group slot available.
+ *
+ * Currently not exposed to userspace.
+ */
+ PANTHOR_CSG_PRIORITY_RT,
+
+ /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
+ PANTHOR_CSG_PRIORITY_COUNT,
+};
+
+/**
+ * struct panthor_scheduler - Object used to manage the scheduler
+ */
+struct panthor_scheduler {
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /**
+ * @wq: Workqueue used by our internal scheduler logic and
+ * drm_gpu_scheduler.
+ *
+ * Used for the scheduler tick, group update or other kind of FW
+ * event processing that can't be handled in the threaded interrupt
+ * path. Also passed to the drm_gpu_scheduler instances embedded
+ * in panthor_queue.
+ */
+ struct workqueue_struct *wq;
+
+ /**
+ * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
+ *
+ * We have a queue dedicated to heap chunk allocation works to avoid
+ * blocking the rest of the scheduler if the allocation tries to
+ * reclaim memory.
+ */
+ struct workqueue_struct *heap_alloc_wq;
+
+ /** @tick_work: Work executed on a scheduling tick. */
+ struct delayed_work tick_work;
+
+ /**
+ * @sync_upd_work: Work used to process synchronization object updates.
+ *
+ * We use this work to unblock queues/groups that were waiting on a
+ * synchronization object.
+ */
+ struct work_struct sync_upd_work;
+
+ /**
+ * @fw_events_work: Work used to process FW events outside the interrupt path.
+ *
+ * Even if the interrupt is threaded, we need any event processing
+ * that require taking the panthor_scheduler::lock to be processed
+ * outside the interrupt path so we don't block the tick logic when
+ * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
+ * event processing requires taking this lock, we just delegate all
+ * FW event processing to the scheduler workqueue.
+ */
+ struct work_struct fw_events_work;
+
+ /**
+ * @fw_events: Bitmask encoding pending FW events.
+ */
+ atomic_t fw_events;
+
+ /**
+ * @resched_target: When the next tick should occur.
+ *
+ * Expressed in jiffies.
+ */
+ u64 resched_target;
+
+ /**
+ * @last_tick: When the last tick occurred.
+ *
+ * Expressed in jiffies.
+ */
+ u64 last_tick;
+
+ /** @tick_period: Tick period in jiffies. */
+ u64 tick_period;
+
+ /**
+ * @lock: Lock protecting access to all the scheduler fields.
+ *
+ * Should be taken in the tick work, the irq handler, and anywhere the @groups
+ * fields are touched.
+ */
+ struct mutex lock;
+
+ /** @groups: Various lists used to classify groups. */
+ struct {
+ /**
+ * @runnable: Runnable group lists.
+ *
+ * When a group has queues that want to execute something,
+ * its panthor_group::run_node should be inserted here.
+ *
+ * One list per-priority.
+ */
+ struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
+
+ /**
+ * @idle: Idle group lists.
+ *
+ * When all queues of a group are idle (either because they
+ * have nothing to execute, or because they are blocked), the
+ * panthor_group::run_node field should be inserted here.
+ *
+ * One list per-priority.
+ */
+ struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
+
+ /**
+ * @waiting: List of groups whose queues are blocked on a
+ * synchronization object.
+ *
+ * Insert panthor_group::wait_node here when a group is waiting
+ * for synchronization objects to be signaled.
+ *
+ * This list is evaluated in the @sync_upd_work work.
+ */
+ struct list_head waiting;
+ } groups;
+
+ /**
+ * @csg_slots: FW command stream group slots.
+ */
+ struct panthor_csg_slot csg_slots[MAX_CSGS];
+
+ /** @csg_slot_count: Number of command stream group slots exposed by the FW. */
+ u32 csg_slot_count;
+
+ /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
+ u32 cs_slot_count;
+
+ /** @as_slot_count: Number of address space slots supported by the MMU. */
+ u32 as_slot_count;
+
+ /** @used_csg_slot_count: Number of command stream group slot currently used. */
+ u32 used_csg_slot_count;
+
+ /** @sb_slot_count: Number of scoreboard slots. */
+ u32 sb_slot_count;
+
+ /**
+ * @might_have_idle_groups: True if an active group might have become idle.
+ *
+ * This will force a tick, so other runnable groups can be scheduled if one
+ * or more active groups became idle.
+ */
+ bool might_have_idle_groups;
+
+ /** @pm: Power management related fields. */
+ struct {
+ /** @has_ref: True if the scheduler owns a runtime PM reference. */
+ bool has_ref;
+ } pm;
+
+ /** @reset: Reset related fields. */
+ struct {
+ /** @lock: Lock protecting the other reset fields. */
+ struct mutex lock;
+
+ /**
+ * @in_progress: True if a reset is in progress.
+ *
+ * Set to true in panthor_sched_pre_reset() and back to false in
+ * panthor_sched_post_reset().
+ */
+ atomic_t in_progress;
+
+ /**
+ * @stopped_groups: List containing all groups that were stopped
+ * before a reset.
+ *
+ * Insert panthor_group::run_node in the pre_reset path.
+ */
+ struct list_head stopped_groups;
+ } reset;
+};
+
+/**
+ * struct panthor_syncobj_32b - 32-bit FW synchronization object
+ */
+struct panthor_syncobj_32b {
+ /** @seqno: Sequence number. */
+ u32 seqno;
+
+ /**
+ * @status: Status.
+ *
+ * Not zero on failure.
+ */
+ u32 status;
+};
+
+/**
+ * struct panthor_syncobj_64b - 64-bit FW synchronization object
+ */
+struct panthor_syncobj_64b {
+ /** @seqno: Sequence number. */
+ u64 seqno;
+
+ /**
+ * @status: Status.
+ *
+ * Not zero on failure.
+ */
+ u32 status;
+
+ /** @pad: MBZ. */
+ u32 pad;
+};
+
+/**
+ * struct panthor_queue - Execution queue
+ */
+struct panthor_queue {
+ /** @scheduler: DRM scheduler used for this queue. */
+ struct drm_gpu_scheduler scheduler;
+
+ /** @entity: DRM scheduling entity used for this queue. */
+ struct drm_sched_entity entity;
+
+ /**
+ * @remaining_time: Time remaining before the job timeout expires.
+ *
+ * The job timeout is suspended when the queue is not scheduled by the
+ * FW. Every time we suspend the timer, we need to save the remaining
+ * time so we can restore it later on.
+ */
+ unsigned long remaining_time;
+
+ /** @timeout_suspended: True if the job timeout was suspended. */
+ bool timeout_suspended;
+
+ /**
+ * @doorbell_id: Doorbell assigned to this queue.
+ *
+ * Right now, all groups share the same doorbell, and the doorbell ID
+ * is assigned to group_slot + 1 when the group is assigned a slot. But
+ * we might decide to provide fine grained doorbell assignment at some
+ * point, so don't have to wake up all queues in a group every time one
+ * of them is updated.
+ */
+ u8 doorbell_id;
+
+ /**
+ * @priority: Priority of the queue inside the group.
+ *
+ * Must be less than 16 (Only 4 bits available).
+ */
+ u8 priority;
+#define CSF_MAX_QUEUE_PRIO GENMASK(3, 0)
+
+ /** @ringbuf: Command stream ring-buffer. */
+ struct panthor_kernel_bo *ringbuf;
+
+ /** @iface: Firmware interface. */
+ struct {
+ /** @mem: FW memory allocated for this interface. */
+ struct panthor_kernel_bo *mem;
+
+ /** @input: Input interface. */
+ struct panthor_fw_ringbuf_input_iface *input;
+
+ /** @output: Output interface. */
+ const struct panthor_fw_ringbuf_output_iface *output;
+
+ /** @input_fw_va: FW virtual address of the input interface buffer. */
+ u32 input_fw_va;
+
+ /** @output_fw_va: FW virtual address of the output interface buffer. */
+ u32 output_fw_va;
+ } iface;
+
+ /**
+ * @syncwait: Stores information about the synchronization object this
+ * queue is waiting on.
+ */
+ struct {
+ /** @gpu_va: GPU address of the synchronization object. */
+ u64 gpu_va;
+
+ /** @ref: Reference value to compare against. */
+ u64 ref;
+
+ /** @gt: True if this is a greater-than test. */
+ bool gt;
+
+ /** @sync64: True if this is a 64-bit sync object. */
+ bool sync64;
+
+ /** @bo: Buffer object holding the synchronization object. */
+ struct drm_gem_object *obj;
+
+ /** @offset: Offset of the synchronization object inside @bo. */
+ u64 offset;
+
+ /**
+ * @kmap: Kernel mapping of the buffer object holding the
+ * synchronization object.
+ */
+ void *kmap;
+ } syncwait;
+
+ /** @fence_ctx: Fence context fields. */
+ struct {
+ /** @lock: Used to protect access to all fences allocated by this context. */
+ spinlock_t lock;
+
+ /**
+ * @id: Fence context ID.
+ *
+ * Allocated with dma_fence_context_alloc().
+ */
+ u64 id;
+
+ /** @seqno: Sequence number of the last initialized fence. */
+ atomic64_t seqno;
+
+ /**
+ * @in_flight_jobs: List containing all in-flight jobs.
+ *
+ * Used to keep track and signal panthor_job::done_fence when the
+ * synchronization object attached to the queue is signaled.
+ */
+ struct list_head in_flight_jobs;
+ } fence_ctx;
+};
+
+/**
+ * enum panthor_group_state - Scheduling group state.
+ */
+enum panthor_group_state {
+ /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
+ PANTHOR_CS_GROUP_CREATED,
+
+ /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
+ PANTHOR_CS_GROUP_ACTIVE,
+
+ /**
+ * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
+ * inactive/suspended right now.
+ */
+ PANTHOR_CS_GROUP_SUSPENDED,
+
+ /**
+ * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
+ *
+ * Can no longer be scheduled. The only allowed action is a destruction.
+ */
+ PANTHOR_CS_GROUP_TERMINATED,
+};
+
+/**
+ * struct panthor_group - Scheduling group object
+ */
+struct panthor_group {
+ /** @refcount: Reference count */
+ struct kref refcount;
+
+ /** @ptdev: Device. */
+ struct panthor_device *ptdev;
+
+ /** @vm: VM bound to the group. */
+ struct panthor_vm *vm;
+
+ /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
+ u64 compute_core_mask;
+
+ /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
+ u64 fragment_core_mask;
+
+ /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
+ u64 tiler_core_mask;
+
+ /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
+ u8 max_compute_cores;
+
+ /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
+ u8 max_fragment_cores;
+
+ /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
+ u8 max_tiler_cores;
+
+ /** @priority: Group priority (check panthor_csg_priority). */
+ u8 priority;
+
+ /** @blocked_queues: Bitmask reflecting the blocked queues. */
+ u32 blocked_queues;
+
+ /** @idle_queues: Bitmask reflecting the idle queues. */
+ u32 idle_queues;
+
+ /** @fatal_lock: Lock used to protect access to fatal fields. */
+ spinlock_t fatal_lock;
+
+ /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
+ u32 fatal_queues;
+
+ /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
+ atomic_t tiler_oom;
+
+ /** @queue_count: Number of queues in this group. */
+ u32 queue_count;
+
+ /** @queues: Queues owned by this group. */
+ struct panthor_queue *queues[MAX_CS_PER_CSG];
+
+ /**
+ * @csg_id: ID of the FW group slot.
+ *
+ * -1 when the group is not scheduled/active.
+ */
+ int csg_id;
+
+ /**
+ * @destroyed: True when the group has been destroyed.
+ *
+ * If a group is destroyed it becomes useless: no further jobs can be submitted
+ * to its queues. We simply wait for all references to be dropped so we can
+ * release the group object.
+ */
+ bool destroyed;
+
+ /**
+ * @timedout: True when a timeout occurred on any of the queues owned by
+ * this group.
+ *
+ * Timeouts can be reported by drm_sched or by the FW. In any case, any
+ * timeout situation is unrecoverable, and the group becomes useless.
+ * We simply wait for all references to be dropped so we can release the
+ * group object.
+ */
+ bool timedout;
+
+ /**
+ * @syncobjs: Pool of per-queue synchronization objects.
+ *
+ * One sync object per queue. The position of the sync object is
+ * determined by the queue index.
+ */
+ struct panthor_kernel_bo *syncobjs;
+
+ /** @state: Group state. */
+ enum panthor_group_state state;
+
+ /**
+ * @suspend_buf: Suspend buffer.
+ *
+ * Stores the state of the group and its queues when a group is suspended.
+ * Used at resume time to restore the group in its previous state.
+ *
+ * The size of the suspend buffer is exposed through the FW interface.
+ */
+ struct panthor_kernel_bo *suspend_buf;
+
+ /**
+ * @protm_suspend_buf: Protection mode suspend buffer.
+ *
+ * Stores the state of the group and its queues when a group that's in
+ * protection mode is suspended.
+ *
+ * Used at resume time to restore the group in its previous state.
+ *
+ * The size of the protection mode suspend buffer is exposed through the
+ * FW interface.
+ */
+ struct panthor_kernel_bo *protm_suspend_buf;
+
+ /** @sync_upd_work: Work used to check/signal job fences. */
+ struct work_struct sync_upd_work;
+
+ /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
+ struct work_struct tiler_oom_work;
+
+ /** @term_work: Work used to finish the group termination procedure. */
+ struct work_struct term_work;
+
+ /**
+ * @release_work: Work used to release group resources.
+ *
+ * We need to postpone the group release to avoid a deadlock when
+ * the last ref is released in the tick work.
+ */
+ struct work_struct release_work;
+
+ /**
+ * @run_node: Node used to insert the group in the
+ * panthor_group::groups::{runnable,idle} and
+ * panthor_group::reset.stopped_groups lists.
+ */
+ struct list_head run_node;
+
+ /**
+ * @wait_node: Node used to insert the group in the
+ * panthor_group::groups::waiting list.
+ */
+ struct list_head wait_node;
+};
+
+/**
+ * group_queue_work() - Queue a group work
+ * @group: Group to queue the work for.
+ * @wname: Work name.
+ *
+ * Grabs a ref and queue a work item to the scheduler workqueue. If
+ * the work was already queued, we release the reference we grabbed.
+ *
+ * Work callbacks must release the reference we grabbed here.
+ */
+#define group_queue_work(group, wname) \
+ do { \
+ group_get(group); \
+ if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
+ group_put(group); \
+ } while (0)
+
+/**
+ * sched_queue_work() - Queue a scheduler work.
+ * @sched: Scheduler object.
+ * @wname: Work name.
+ *
+ * Conditionally queues a scheduler work if no reset is pending/in-progress.
+ */
+#define sched_queue_work(sched, wname) \
+ do { \
+ if (!atomic_read(&(sched)->reset.in_progress) && \
+ !panthor_device_reset_is_pending((sched)->ptdev)) \
+ queue_work((sched)->wq, &(sched)->wname ## _work); \
+ } while (0)
+
+/**
+ * sched_queue_delayed_work() - Queue a scheduler delayed work.
+ * @sched: Scheduler object.
+ * @wname: Work name.
+ * @delay: Work delay in jiffies.
+ *
+ * Conditionally queues a scheduler delayed work if no reset is
+ * pending/in-progress.
+ */
+#define sched_queue_delayed_work(sched, wname, delay) \
+ do { \
+ if (!atomic_read(&sched->reset.in_progress) && \
+ !panthor_device_reset_is_pending((sched)->ptdev)) \
+ mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
+ } while (0)
+
+/*
+ * We currently set the maximum of groups per file to an arbitrary low value.
+ * But this can be updated if we need more.
+ */
+#define MAX_GROUPS_PER_POOL 128
+
+/**
+ * struct panthor_group_pool - Group pool
+ *
+ * Each file get assigned a group pool.
+ */
+struct panthor_group_pool {
+ /** @xa: Xarray used to manage group handles. */
+ struct xarray xa;
+};
+
+/**
+ * struct panthor_job - Used to manage GPU job
+ */
+struct panthor_job {
+ /** @base: Inherit from drm_sched_job. */
+ struct drm_sched_job base;
+
+ /** @refcount: Reference count. */
+ struct kref refcount;
+
+ /** @group: Group of the queue this job will be pushed to. */
+ struct panthor_group *group;
+
+ /** @queue_idx: Index of the queue inside @group. */
+ u32 queue_idx;
+
+ /** @call_info: Information about the userspace command stream call. */
+ struct {
+ /** @start: GPU address of the userspace command stream. */
+ u64 start;
+
+ /** @size: Size of the userspace command stream. */
+ u32 size;
+
+ /**
+ * @latest_flush: Flush ID at the time the userspace command
+ * stream was built.
+ *
+ * Needed for the flush reduction mechanism.
+ */
+ u32 latest_flush;
+ } call_info;
+
+ /** @ringbuf: Position of this job is in the ring buffer. */
+ struct {
+ /** @start: Start offset. */
+ u64 start;
+
+ /** @end: End offset. */
+ u64 end;
+ } ringbuf;
+
+ /**
+ * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
+ * list.
+ */
+ struct list_head node;
+
+ /** @done_fence: Fence signaled when the job is finished or cancelled. */
+ struct dma_fence *done_fence;
+};
+
+static void
+panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
+{
+ if (queue->syncwait.kmap) {
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
+
+ drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ queue->syncwait.kmap = NULL;
+ }
+
+ drm_gem_object_put(queue->syncwait.obj);
+ queue->syncwait.obj = NULL;
+}
+
+static void *
+panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_gem_object *bo;
+ struct iosys_map map;
+ int ret;
+
+ if (queue->syncwait.kmap)
+ return queue->syncwait.kmap + queue->syncwait.offset;
+
+ bo = panthor_vm_get_bo_for_va(group->vm,
+ queue->syncwait.gpu_va,
+ &queue->syncwait.offset);
+ if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
+ goto err_put_syncwait_obj;
+
+ queue->syncwait.obj = &bo->base.base;
+ ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ goto err_put_syncwait_obj;
+
+ queue->syncwait.kmap = map.vaddr;
+ if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
+ goto err_put_syncwait_obj;
+
+ return queue->syncwait.kmap + queue->syncwait.offset;
+
+err_put_syncwait_obj:
+ panthor_queue_put_syncwait_obj(queue);
+ return NULL;
+}
+
+static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
+{
+ if (IS_ERR_OR_NULL(queue))
+ return;
+
+ if (queue->entity.fence_context)
+ drm_sched_entity_destroy(&queue->entity);
+
+ if (queue->scheduler.ops)
+ drm_sched_fini(&queue->scheduler);
+
+ panthor_queue_put_syncwait_obj(queue);
+
+ panthor_kernel_bo_destroy(group->vm, queue->ringbuf);
+ panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem);
+
+ kfree(queue);
+}
+
+static void group_release_work(struct work_struct *work)
+{
+ struct panthor_group *group = container_of(work,
+ struct panthor_group,
+ release_work);
+ struct panthor_device *ptdev = group->ptdev;
+ u32 i;
+
+ for (i = 0; i < group->queue_count; i++)
+ group_free_queue(group, group->queues[i]);
+
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf);
+ panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf);
+ panthor_kernel_bo_destroy(group->vm, group->syncobjs);
+
+ panthor_vm_put(group->vm);
+ kfree(group);
+}
+
+static void group_release(struct kref *kref)
+{
+ struct panthor_group *group = container_of(kref,
+ struct panthor_group,
+ refcount);
+ struct panthor_device *ptdev = group->ptdev;
+
+ drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
+ drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
+ drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
+
+ queue_work(panthor_cleanup_wq, &group->release_work);
+}
+
+static void group_put(struct panthor_group *group)
+{
+ if (group)
+ kref_put(&group->refcount, group_release);
+}
+
+static struct panthor_group *
+group_get(struct panthor_group *group)
+{
+ if (group)
+ kref_get(&group->refcount);
+
+ return group;
+}
+
+/**
+ * group_bind_locked() - Bind a group to a group slot
+ * @group: Group.
+ * @csg_id: Slot.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+group_bind_locked(struct panthor_group *group, u32 csg_id)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_csg_slot *csg_slot;
+ int ret;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
+ ptdev->scheduler->csg_slots[csg_id].group))
+ return -EINVAL;
+
+ ret = panthor_vm_active(group->vm);
+ if (ret)
+ return ret;
+
+ csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ group_get(group);
+ group->csg_id = csg_id;
+
+ /* Dummy doorbell allocation: doorbell is assigned to the group and
+ * all queues use the same doorbell.
+ *
+ * TODO: Implement LRU-based doorbell assignment, so the most often
+ * updated queues get their own doorbell, thus avoiding useless checks
+ * on queues belonging to the same group that are rarely updated.
+ */
+ for (u32 i = 0; i < group->queue_count; i++)
+ group->queues[i]->doorbell_id = csg_id + 1;
+
+ csg_slot->group = group;
+
+ return 0;
+}
+
+/**
+ * group_unbind_locked() - Unbind a group from a slot.
+ * @group: Group to unbind.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int
+group_unbind_locked(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_csg_slot *slot;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
+ return -EINVAL;
+
+ if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
+ return -EINVAL;
+
+ slot = &ptdev->scheduler->csg_slots[group->csg_id];
+ panthor_vm_idle(group->vm);
+ group->csg_id = -1;
+
+ /* Tiler OOM events will be re-issued next time the group is scheduled. */
+ atomic_set(&group->tiler_oom, 0);
+ cancel_work(&group->tiler_oom_work);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ group->queues[i]->doorbell_id = -1;
+
+ slot->group = NULL;
+
+ group_put(group);
+ return 0;
+}
+
+/**
+ * cs_slot_prog_locked() - Program a queue slot
+ * @ptdev: Device.
+ * @csg_id: Group slot ID.
+ * @cs_id: Queue slot ID.
+ *
+ * Program a queue slot with the queue information so things can start being
+ * executed on this queue.
+ *
+ * The group slot must have a group bound to it already (group_bind_locked()).
+ */
+static void
+cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ queue->iface.input->extract = queue->iface.output->extract;
+ drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
+
+ cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
+ cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+ cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
+ cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
+ cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
+ CS_CONFIG_DOORBELL(queue->doorbell_id);
+ cs_iface->input->ack_irq_mask = ~0;
+ panthor_fw_update_reqs(cs_iface, req,
+ CS_IDLE_SYNC_WAIT |
+ CS_IDLE_EMPTY |
+ CS_STATE_START |
+ CS_EXTRACT_EVENT,
+ CS_IDLE_SYNC_WAIT |
+ CS_IDLE_EMPTY |
+ CS_STATE_MASK |
+ CS_EXTRACT_EVENT);
+ if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
+ drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
+ queue->timeout_suspended = false;
+ }
+}
+
+/**
+ * cs_slot_reset_locked() - Reset a queue slot
+ * @ptdev: Device.
+ * @csg_id: Group slot.
+ * @cs_id: Queue slot.
+ *
+ * Change the queue slot state to STOP and suspend the queue timeout if
+ * the queue is not blocked.
+ *
+ * The group slot must have a group bound to it (group_bind_locked()).
+ */
+static int
+cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
+ struct panthor_queue *queue = group->queues[cs_id];
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ panthor_fw_update_reqs(cs_iface, req,
+ CS_STATE_STOP,
+ CS_STATE_MASK);
+
+ /* If the queue is blocked, we want to keep the timeout running, so
+ * we can detect unbounded waits and kill the group when that happens.
+ */
+ if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
+ queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
+ queue->timeout_suspended = true;
+ WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
+ }
+
+ return 0;
+}
+
+/**
+ * csg_slot_sync_priority_locked() - Synchronize the group slot priority
+ * @ptdev: Device.
+ * @csg_id: Group slot ID.
+ *
+ * Group slot priority update happens asynchronously. When we receive a
+ * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
+ * reflect it to our panthor_csg_slot object.
+ */
+static void
+csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
+}
+
+/**
+ * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
+ * @ptdev: Device.
+ * @csg_id: Group slot.
+ * @cs_id: Queue slot.
+ *
+ * Queue state is updated on group suspend or STATUS_UPDATE event.
+ */
+static void
+cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
+{
+ struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
+ struct panthor_queue *queue = group->queues[cs_id];
+ struct panthor_fw_cs_iface *cs_iface =
+ panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
+
+ u32 status_wait_cond;
+
+ switch (cs_iface->output->status_blocked_reason) {
+ case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
+ if (queue->iface.input->insert == queue->iface.output->extract &&
+ cs_iface->output->status_scoreboards == 0)
+ group->idle_queues |= BIT(cs_id);
+ break;
+
+ case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
+ if (list_empty(&group->wait_node)) {
+ list_move_tail(&group->wait_node,
+ &group->ptdev->scheduler->groups.waiting);
+ }
+ group->blocked_queues |= BIT(cs_id);
+ queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
+ queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
+ status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
+ queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
+ if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
+ u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
+
+ queue->syncwait.sync64 = true;
+ queue->syncwait.ref |= sync_val_hi << 32;
+ } else {
+ queue->syncwait.sync64 = false;
+ }
+ break;
+
+ default:
+ /* Other reasons are not blocking. Consider the queue as runnable
+ * in those cases.
+ */
+ break;
+ }
+}
+
+static void
+csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ u32 i;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ group->idle_queues = 0;
+ group->blocked_queues = 0;
+
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
+ }
+}
+
+static void
+csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_group *group;
+ enum panthor_group_state new_state, old_state;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ group = csg_slot->group;
+
+ if (!group)
+ return;
+
+ old_state = group->state;
+ switch (csg_iface->output->ack & CSG_STATE_MASK) {
+ case CSG_STATE_START:
+ case CSG_STATE_RESUME:
+ new_state = PANTHOR_CS_GROUP_ACTIVE;
+ break;
+ case CSG_STATE_TERMINATE:
+ new_state = PANTHOR_CS_GROUP_TERMINATED;
+ break;
+ case CSG_STATE_SUSPEND:
+ new_state = PANTHOR_CS_GROUP_SUSPENDED;
+ break;
+ }
+
+ if (old_state == new_state)
+ return;
+
+ if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
+ csg_slot_sync_queues_state_locked(ptdev, csg_id);
+
+ if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
+ u32 i;
+
+ /* Reset the queue slots so we start from a clean
+ * state when starting/resuming a new group on this
+ * CSG slot. No wait needed here, and no ringbell
+ * either, since the CS slot will only be re-used
+ * on the next CSG start operation.
+ */
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_reset_locked(ptdev, csg_id, i);
+ }
+ }
+
+ group->state = new_state;
+}
+
+static int
+csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
+{
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_csg_slot *csg_slot;
+ struct panthor_group *group;
+ u32 queue_mask = 0, i;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (priority > MAX_CSG_PRIO)
+ return -EINVAL;
+
+ if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
+ return -EINVAL;
+
+ csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ group = csg_slot->group;
+ if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
+ return 0;
+
+ csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
+
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i]) {
+ cs_slot_prog_locked(ptdev, csg_id, i);
+ queue_mask |= BIT(i);
+ }
+ }
+
+ csg_iface->input->allow_compute = group->compute_core_mask;
+ csg_iface->input->allow_fragment = group->fragment_core_mask;
+ csg_iface->input->allow_other = group->tiler_core_mask;
+ csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
+ CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
+ CSG_EP_REQ_TILER(group->max_tiler_cores) |
+ CSG_EP_REQ_PRIORITY(priority);
+ csg_iface->input->config = panthor_vm_as(group->vm);
+
+ if (group->suspend_buf)
+ csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
+ else
+ csg_iface->input->suspend_buf = 0;
+
+ if (group->protm_suspend_buf) {
+ csg_iface->input->protm_suspend_buf =
+ panthor_kernel_bo_gpuva(group->protm_suspend_buf);
+ } else {
+ csg_iface->input->protm_suspend_buf = 0;
+ }
+
+ csg_iface->input->ack_irq_mask = ~0;
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
+ return 0;
+}
+
+static void
+cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 fatal;
+ u64 info;
+
+ lockdep_assert_held(&sched->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ fatal = cs_iface->output->fatal;
+ info = cs_iface->output->fatal_info;
+
+ if (group)
+ group->fatal_queues |= BIT(cs_id);
+
+ sched_queue_delayed_work(sched, tick, 0);
+ drm_warn(&ptdev->base,
+ "CSG slot %d CS slot: %d\n"
+ "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
+ "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
+ "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
+ csg_id, cs_id,
+ (unsigned int)CS_EXCEPTION_TYPE(fatal),
+ panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
+ (unsigned int)CS_EXCEPTION_DATA(fatal),
+ info);
+}
+
+static void
+cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_queue *queue = group && cs_id < group->queue_count ?
+ group->queues[cs_id] : NULL;
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 fault;
+ u64 info;
+
+ lockdep_assert_held(&sched->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ fault = cs_iface->output->fault;
+ info = cs_iface->output->fault_info;
+
+ if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ u64 cs_extract = queue->iface.output->extract;
+ struct panthor_job *job;
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
+ if (cs_extract >= job->ringbuf.end)
+ continue;
+
+ if (cs_extract < job->ringbuf.start)
+ break;
+
+ dma_fence_set_error(job->done_fence, -EINVAL);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+ }
+
+ drm_warn(&ptdev->base,
+ "CSG slot %d CS slot: %d\n"
+ "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
+ "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
+ "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
+ csg_id, cs_id,
+ (unsigned int)CS_EXCEPTION_TYPE(fault),
+ panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
+ (unsigned int)CS_EXCEPTION_DATA(fault),
+ info);
+}
+
+static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 renderpasses_in_flight, pending_frag_count;
+ struct panthor_heap_pool *heaps = NULL;
+ u64 heap_address, new_chunk_va = 0;
+ u32 vt_start, vt_end, frag_end;
+ int ret, csg_id;
+
+ mutex_lock(&sched->lock);
+ csg_id = group->csg_id;
+ if (csg_id >= 0) {
+ struct panthor_fw_cs_iface *cs_iface;
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ heaps = panthor_vm_get_heap_pool(group->vm, false);
+ heap_address = cs_iface->output->heap_address;
+ vt_start = cs_iface->output->heap_vt_start;
+ vt_end = cs_iface->output->heap_vt_end;
+ frag_end = cs_iface->output->heap_frag_end;
+ renderpasses_in_flight = vt_start - frag_end;
+ pending_frag_count = vt_end - frag_end;
+ }
+ mutex_unlock(&sched->lock);
+
+ /* The group got scheduled out, we stop here. We will get a new tiler OOM event
+ * when it's scheduled again.
+ */
+ if (unlikely(csg_id < 0))
+ return 0;
+
+ if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
+ ret = -EINVAL;
+ } else {
+ /* We do the allocation without holding the scheduler lock to avoid
+ * blocking the scheduling.
+ */
+ ret = panthor_heap_grow(heaps, heap_address,
+ renderpasses_in_flight,
+ pending_frag_count, &new_chunk_va);
+ }
+
+ if (ret && ret != -EBUSY) {
+ drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
+ group->fatal_queues |= BIT(cs_id);
+ sched_queue_delayed_work(sched, tick, 0);
+ goto out_put_heap_pool;
+ }
+
+ mutex_lock(&sched->lock);
+ csg_id = group->csg_id;
+ if (csg_id >= 0) {
+ struct panthor_fw_csg_iface *csg_iface;
+ struct panthor_fw_cs_iface *cs_iface;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+
+ cs_iface->input->heap_start = new_chunk_va;
+ cs_iface->input->heap_end = new_chunk_va;
+ panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
+ panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
+ }
+ mutex_unlock(&sched->lock);
+
+ /* We allocated a chunck, but couldn't link it to the heap
+ * context because the group was scheduled out while we were
+ * allocating memory. We need to return this chunk to the heap.
+ */
+ if (unlikely(csg_id < 0 && new_chunk_va))
+ panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
+
+ ret = 0;
+
+out_put_heap_pool:
+ panthor_heap_pool_put(heaps);
+ return ret;
+}
+
+static void group_tiler_oom_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, tiler_oom_work);
+ u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
+
+ while (tiler_oom) {
+ u32 cs_id = ffs(tiler_oom) - 1;
+
+ group_process_tiler_oom(group, cs_id);
+ tiler_oom &= ~BIT(cs_id);
+ }
+
+ group_put(group);
+}
+
+static void
+cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&sched->lock);
+
+ if (drm_WARN_ON(&ptdev->base, !group))
+ return;
+
+ atomic_or(BIT(cs_id), &group->tiler_oom);
+
+ /* We don't use group_queue_work() here because we want to queue the
+ * work item to the heap_alloc_wq.
+ */
+ group_get(group);
+ if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
+ group_put(group);
+}
+
+static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
+ u32 csg_id, u32 cs_id)
+{
+ struct panthor_fw_cs_iface *cs_iface;
+ u32 req, ack, events;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
+ req = cs_iface->input->req;
+ ack = cs_iface->output->ack;
+ events = (req ^ ack) & CS_EVT_MASK;
+
+ if (events & CS_FATAL)
+ cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
+
+ if (events & CS_FAULT)
+ cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
+
+ if (events & CS_TILER_OOM)
+ cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
+
+ /* We don't acknowledge the TILER_OOM event since its handling is
+ * deferred to a separate work.
+ */
+ panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
+
+ return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
+}
+
+static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_fw_csg_iface *csg_iface;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
+}
+
+static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ lockdep_assert_held(&sched->lock);
+
+ sched->might_have_idle_groups = true;
+
+ /* Schedule a tick so we can evict idle groups and schedule non-idle
+ * ones. This will also update runtime PM and devfreq busy/idle states,
+ * so the device can lower its frequency or get suspended.
+ */
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
+ u32 csg_id)
+{
+ struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (group)
+ group_queue_work(group, sync_upd);
+
+ sched_queue_work(ptdev->scheduler, sync_upd);
+}
+
+static void
+csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
+
+ lockdep_assert_held(&sched->lock);
+
+ drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
+
+ group = csg_slot->group;
+ if (!drm_WARN_ON(&ptdev->base, !group))
+ group->timedout = true;
+
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
+{
+ u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 ring_cs_db_mask = 0;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
+ return;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ req = READ_ONCE(csg_iface->input->req);
+ ack = READ_ONCE(csg_iface->output->ack);
+ cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
+ cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
+ csg_events = (req ^ ack) & CSG_EVT_MASK;
+
+ /* There may not be any pending CSG/CS interrupts to process */
+ if (req == ack && cs_irq_req == cs_irq_ack)
+ return;
+
+ /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
+ * examining the CS_ACK & CS_REQ bits. This would ensure that Host
+ * doesn't miss an interrupt for the CS in the race scenario where
+ * whilst Host is servicing an interrupt for the CS, firmware sends
+ * another interrupt for that CS.
+ */
+ csg_iface->input->cs_irq_ack = cs_irq_req;
+
+ panthor_fw_update_reqs(csg_iface, req, ack,
+ CSG_SYNC_UPDATE |
+ CSG_IDLE |
+ CSG_PROGRESS_TIMER_EVENT);
+
+ if (csg_events & CSG_IDLE)
+ csg_slot_process_idle_event_locked(ptdev, csg_id);
+
+ if (csg_events & CSG_PROGRESS_TIMER_EVENT)
+ csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
+
+ cs_irqs = cs_irq_req ^ cs_irq_ack;
+ while (cs_irqs) {
+ u32 cs_id = ffs(cs_irqs) - 1;
+
+ if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
+ ring_cs_db_mask |= BIT(cs_id);
+
+ cs_irqs &= ~BIT(cs_id);
+ }
+
+ if (csg_events & CSG_SYNC_UPDATE)
+ csg_slot_sync_update_locked(ptdev, csg_id);
+
+ if (ring_cs_db_mask)
+ panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
+
+ panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
+}
+
+static void sched_process_idle_event_locked(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ /* Acknowledge the idle event and schedule a tick. */
+ panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
+ sched_queue_delayed_work(ptdev->scheduler, tick, 0);
+}
+
+/**
+ * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
+ * @ptdev: Device.
+ */
+static void sched_process_global_irq_locked(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ u32 req, ack, evts;
+
+ lockdep_assert_held(&ptdev->scheduler->lock);
+
+ req = READ_ONCE(glb_iface->input->req);
+ ack = READ_ONCE(glb_iface->output->ack);
+ evts = (req ^ ack) & GLB_EVT_MASK;
+
+ if (evts & GLB_IDLE)
+ sched_process_idle_event_locked(ptdev);
+}
+
+static void process_fw_events_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
+ fw_events_work);
+ u32 events = atomic_xchg(&sched->fw_events, 0);
+ struct panthor_device *ptdev = sched->ptdev;
+
+ mutex_lock(&sched->lock);
+
+ if (events & JOB_INT_GLOBAL_IF) {
+ sched_process_global_irq_locked(ptdev);
+ events &= ~JOB_INT_GLOBAL_IF;
+ }
+
+ while (events) {
+ u32 csg_id = ffs(events) - 1;
+
+ sched_process_csg_irq_locked(ptdev, csg_id);
+ events &= ~BIT(csg_id);
+ }
+
+ mutex_unlock(&sched->lock);
+}
+
+/**
+ * panthor_sched_report_fw_events() - Report FW events to the scheduler.
+ */
+void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
+{
+ if (!ptdev->scheduler)
+ return;
+
+ atomic_or(events, &ptdev->scheduler->fw_events);
+ sched_queue_work(ptdev->scheduler, fw_events);
+}
+
+static const char *fence_get_driver_name(struct dma_fence *fence)
+{
+ return "panthor";
+}
+
+static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "queue-fence";
+}
+
+static const struct dma_fence_ops panthor_queue_fence_ops = {
+ .get_driver_name = fence_get_driver_name,
+ .get_timeline_name = queue_fence_get_timeline_name,
+};
+
+struct panthor_csg_slots_upd_ctx {
+ u32 update_mask;
+ u32 timedout_mask;
+ struct {
+ u32 value;
+ u32 mask;
+ } requests[MAX_CSGS];
+};
+
+static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
+ struct panthor_csg_slots_upd_ctx *ctx,
+ u32 csg_id, u32 value, u32 mask)
+{
+ if (drm_WARN_ON(&ptdev->base, !mask) ||
+ drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
+ return;
+
+ ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
+ ctx->requests[csg_id].mask |= mask;
+ ctx->update_mask |= BIT(csg_id);
+}
+
+static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
+ struct panthor_csg_slots_upd_ctx *ctx)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 update_slots = ctx->update_mask;
+
+ lockdep_assert_held(&sched->lock);
+
+ if (!ctx->update_mask)
+ return 0;
+
+ while (update_slots) {
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 csg_id = ffs(update_slots) - 1;
+
+ update_slots &= ~BIT(csg_id);
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ panthor_fw_update_reqs(csg_iface, req,
+ ctx->requests[csg_id].value,
+ ctx->requests[csg_id].mask);
+ }
+
+ panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
+
+ update_slots = ctx->update_mask;
+ while (update_slots) {
+ struct panthor_fw_csg_iface *csg_iface;
+ u32 csg_id = ffs(update_slots) - 1;
+ u32 req_mask = ctx->requests[csg_id].mask, acked;
+ int ret;
+
+ update_slots &= ~BIT(csg_id);
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+
+ ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
+
+ if (acked & CSG_ENDPOINT_CONFIG)
+ csg_slot_sync_priority_locked(ptdev, csg_id);
+
+ if (acked & CSG_STATE_MASK)
+ csg_slot_sync_state_locked(ptdev, csg_id);
+
+ if (acked & CSG_STATUS_UPDATE) {
+ csg_slot_sync_queues_state_locked(ptdev, csg_id);
+ csg_slot_sync_idle_state_locked(ptdev, csg_id);
+ }
+
+ if (ret && acked != req_mask &&
+ ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
+ drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
+ ctx->timedout_mask |= BIT(csg_id);
+ }
+ }
+
+ if (ctx->timedout_mask)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct panthor_sched_tick_ctx {
+ struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
+ struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
+ u32 idle_group_count;
+ u32 group_count;
+ enum panthor_csg_priority min_priority;
+ struct panthor_vm *vms[MAX_CS_PER_CSG];
+ u32 as_count;
+ bool immediate_tick;
+ u32 csg_upd_failed_mask;
+};
+
+static bool
+tick_ctx_is_full(const struct panthor_scheduler *sched,
+ const struct panthor_sched_tick_ctx *ctx)
+{
+ return ctx->group_count == sched->csg_slot_count;
+}
+
+static bool
+group_is_idle(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ u32 inactive_queues;
+
+ if (group->csg_id >= 0)
+ return ptdev->scheduler->csg_slots[group->csg_id].idle;
+
+ inactive_queues = group->idle_queues | group->blocked_queues;
+ return hweight32(inactive_queues) == group->queue_count;
+}
+
+static bool
+group_can_run(struct panthor_group *group)
+{
+ return group->state != PANTHOR_CS_GROUP_TERMINATED &&
+ !group->destroyed && group->fatal_queues == 0 &&
+ !group->timedout;
+}
+
+static void
+tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ struct list_head *queue,
+ bool skip_idle_groups,
+ bool owned_by_tick_ctx)
+{
+ struct panthor_group *group, *tmp;
+
+ if (tick_ctx_is_full(sched, ctx))
+ return;
+
+ list_for_each_entry_safe(group, tmp, queue, run_node) {
+ u32 i;
+
+ if (!group_can_run(group))
+ continue;
+
+ if (skip_idle_groups && group_is_idle(group))
+ continue;
+
+ for (i = 0; i < ctx->as_count; i++) {
+ if (ctx->vms[i] == group->vm)
+ break;
+ }
+
+ if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
+ continue;
+
+ if (!owned_by_tick_ctx)
+ group_get(group);
+
+ list_move_tail(&group->run_node, &ctx->groups[group->priority]);
+ ctx->group_count++;
+ if (group_is_idle(group))
+ ctx->idle_group_count++;
+
+ if (i == ctx->as_count)
+ ctx->vms[ctx->as_count++] = group->vm;
+
+ if (ctx->min_priority > group->priority)
+ ctx->min_priority = group->priority;
+
+ if (tick_ctx_is_full(sched, ctx))
+ return;
+ }
+}
+
+static void
+tick_ctx_insert_old_group(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ struct panthor_group *group,
+ bool full_tick)
+{
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
+ struct panthor_group *other_group;
+
+ if (!full_tick) {
+ list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
+ return;
+ }
+
+ /* Rotate to make sure groups with lower CSG slot
+ * priorities have a chance to get a higher CSG slot
+ * priority next time they get picked. This priority
+ * has an impact on resource request ordering, so it's
+ * important to make sure we don't let one group starve
+ * all other groups with the same group priority.
+ */
+ list_for_each_entry(other_group,
+ &ctx->old_groups[csg_slot->group->priority],
+ run_node) {
+ struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
+
+ if (other_csg_slot->priority > csg_slot->priority) {
+ list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
+ return;
+ }
+ }
+
+ list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
+}
+
+static void
+tick_ctx_init(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx,
+ bool full_tick)
+{
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ int ret;
+ u32 i;
+
+ memset(ctx, 0, sizeof(*ctx));
+ csgs_upd_ctx_init(&upd_ctx);
+
+ ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ INIT_LIST_HEAD(&ctx->groups[i]);
+ INIT_LIST_HEAD(&ctx->old_groups[i]);
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+ struct panthor_group *group = csg_slot->group;
+ struct panthor_fw_csg_iface *csg_iface;
+
+ if (!group)
+ continue;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, i);
+ group_get(group);
+
+ /* If there was unhandled faults on the VM, force processing of
+ * CSG IRQs, so we can flag the faulty queue.
+ */
+ if (panthor_vm_has_unhandled_faults(group->vm)) {
+ sched_process_csg_irq_locked(ptdev, i);
+
+ /* No fatal fault reported, flag all queues as faulty. */
+ if (!group->fatal_queues)
+ group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
+ }
+
+ tick_ctx_insert_old_group(sched, ctx, group, full_tick);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
+ csg_iface->output->ack ^ CSG_STATUS_UPDATE,
+ CSG_STATUS_UPDATE);
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ }
+}
+
+#define NUM_INSTRS_PER_SLOT 16
+
+static void
+group_term_post_processing(struct panthor_group *group)
+{
+ struct panthor_job *job, *tmp;
+ LIST_HEAD(faulty_jobs);
+ bool cookie;
+ u32 i = 0;
+
+ if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
+ return;
+
+ cookie = dma_fence_begin_signalling();
+ for (i = 0; i < group->queue_count; i++) {
+ struct panthor_queue *queue = group->queues[i];
+ struct panthor_syncobj_64b *syncobj;
+ int err;
+
+ if (group->fatal_queues & BIT(i))
+ err = -EINVAL;
+ else if (group->timedout)
+ err = -ETIMEDOUT;
+ else
+ err = -ECANCELED;
+
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ list_move_tail(&job->node, &faulty_jobs);
+ dma_fence_set_error(job->done_fence, err);
+ dma_fence_signal_locked(job->done_fence);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+
+ /* Manually update the syncobj seqno to unblock waiters. */
+ syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
+ syncobj->status = ~0;
+ syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
+ sched_queue_work(group->ptdev->scheduler, sync_upd);
+ }
+ dma_fence_end_signalling(cookie);
+
+ list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
+ list_del_init(&job->node);
+ panthor_job_put(&job->base);
+ }
+}
+
+static void group_term_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, term_work);
+
+ group_term_post_processing(group);
+ group_put(group);
+}
+
+static void
+tick_ctx_cleanup(struct panthor_scheduler *sched,
+ struct panthor_sched_tick_ctx *ctx)
+{
+ struct panthor_group *group, *tmp;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
+ list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
+ /* If everything went fine, we should only have groups
+ * to be terminated in the old_groups lists.
+ */
+ drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
+ group_can_run(group));
+
+ if (!group_can_run(group)) {
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ } else if (group->csg_id >= 0) {
+ list_del_init(&group->run_node);
+ } else {
+ list_move(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ }
+ group_put(group);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ /* If everything went fine, the groups to schedule lists should
+ * be empty.
+ */
+ drm_WARN_ON(&group->ptdev->base,
+ !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
+
+ list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
+ if (group->csg_id >= 0) {
+ list_del_init(&group->run_node);
+ } else {
+ list_move(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ }
+ group_put(group);
+ }
+ }
+}
+
+static void
+tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
+{
+ struct panthor_group *group, *tmp;
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_csg_slot *csg_slot;
+ int prio, new_csg_prio = MAX_CSG_PRIO, i;
+ u32 free_csg_slots = 0;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ int ret;
+
+ csgs_upd_ctx_init(&upd_ctx);
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ /* Suspend or terminate evicted groups. */
+ list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
+ bool term = !group_can_run(group);
+ int csg_id = group->csg_id;
+
+ if (drm_WARN_ON(&ptdev->base, csg_id < 0))
+ continue;
+
+ csg_slot = &sched->csg_slots[csg_id];
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
+ CSG_STATE_MASK);
+ }
+
+ /* Update priorities on already running groups. */
+ list_for_each_entry(group, &ctx->groups[prio], run_node) {
+ struct panthor_fw_csg_iface *csg_iface;
+ int csg_id = group->csg_id;
+
+ if (csg_id < 0) {
+ new_csg_prio--;
+ continue;
+ }
+
+ csg_slot = &sched->csg_slots[csg_id];
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ if (csg_slot->priority == new_csg_prio) {
+ new_csg_prio--;
+ continue;
+ }
+
+ panthor_fw_update_reqs(csg_iface, endpoint_req,
+ CSG_EP_REQ_PRIORITY(new_csg_prio),
+ CSG_EP_REQ_PRIORITY_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+ new_csg_prio--;
+ }
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ return;
+ }
+
+ /* Unbind evicted groups. */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
+ /* This group is gone. Process interrupts to clear
+ * any pending interrupts before we start the new
+ * group.
+ */
+ if (group->csg_id >= 0)
+ sched_process_csg_irq_locked(ptdev, group->csg_id);
+
+ group_unbind_locked(group);
+ }
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ if (!sched->csg_slots[i].group)
+ free_csg_slots |= BIT(i);
+ }
+
+ csgs_upd_ctx_init(&upd_ctx);
+ new_csg_prio = MAX_CSG_PRIO;
+
+ /* Start new groups. */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry(group, &ctx->groups[prio], run_node) {
+ int csg_id = group->csg_id;
+ struct panthor_fw_csg_iface *csg_iface;
+
+ if (csg_id >= 0) {
+ new_csg_prio--;
+ continue;
+ }
+
+ csg_id = ffs(free_csg_slots) - 1;
+ if (drm_WARN_ON(&ptdev->base, csg_id < 0))
+ break;
+
+ csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
+ csg_slot = &sched->csg_slots[csg_id];
+ group_bind_locked(group, csg_id);
+ csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ group->state == PANTHOR_CS_GROUP_SUSPENDED ?
+ CSG_STATE_RESUME : CSG_STATE_START,
+ CSG_STATE_MASK);
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
+ CSG_ENDPOINT_CONFIG);
+ free_csg_slots &= ~BIT(csg_id);
+ }
+ }
+
+ ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ if (ret) {
+ panthor_device_schedule_reset(ptdev);
+ ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
+ return;
+ }
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
+ list_del_init(&group->run_node);
+
+ /* If the group has been destroyed while we were
+ * scheduling, ask for an immediate tick to
+ * re-evaluate as soon as possible and get rid of
+ * this dangling group.
+ */
+ if (group->destroyed)
+ ctx->immediate_tick = true;
+ group_put(group);
+ }
+
+ /* Return evicted groups to the idle or run queues. Groups
+ * that can no longer be run (because they've been destroyed
+ * or experienced an unrecoverable error) will be scheduled
+ * for destruction in tick_ctx_cleanup().
+ */
+ list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
+ if (!group_can_run(group))
+ continue;
+
+ if (group_is_idle(group))
+ list_move_tail(&group->run_node, &sched->groups.idle[prio]);
+ else
+ list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
+ group_put(group);
+ }
+ }
+
+ sched->used_csg_slot_count = ctx->group_count;
+ sched->might_have_idle_groups = ctx->idle_group_count > 0;
+}
+
+static u64
+tick_ctx_update_resched_target(struct panthor_scheduler *sched,
+ const struct panthor_sched_tick_ctx *ctx)
+{
+ /* We had space left, no need to reschedule until some external event happens. */
+ if (!tick_ctx_is_full(sched, ctx))
+ goto no_tick;
+
+ /* If idle groups were scheduled, no need to wake up until some external
+ * event happens (group unblocked, new job submitted, ...).
+ */
+ if (ctx->idle_group_count)
+ goto no_tick;
+
+ if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
+ goto no_tick;
+
+ /* If there are groups of the same priority waiting, we need to
+ * keep the scheduler ticking, otherwise, we'll just wait for
+ * new groups with higher priority to be queued.
+ */
+ if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
+ u64 resched_target = sched->last_tick + sched->tick_period;
+
+ if (time_before64(sched->resched_target, sched->last_tick) ||
+ time_before64(resched_target, sched->resched_target))
+ sched->resched_target = resched_target;
+
+ return sched->resched_target - sched->last_tick;
+ }
+
+no_tick:
+ sched->resched_target = U64_MAX;
+ return U64_MAX;
+}
+
+static void tick_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
+ tick_work.work);
+ struct panthor_device *ptdev = sched->ptdev;
+ struct panthor_sched_tick_ctx ctx;
+ u64 remaining_jiffies = 0, resched_delay;
+ u64 now = get_jiffies_64();
+ int prio, ret, cookie;
+
+ if (!drm_dev_enter(&ptdev->base, &cookie))
+ return;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ goto out_dev_exit;
+
+ if (time_before64(now, sched->resched_target))
+ remaining_jiffies = sched->resched_target - now;
+
+ mutex_lock(&sched->lock);
+ if (panthor_device_reset_is_pending(sched->ptdev))
+ goto out_unlock;
+
+ tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
+ if (ctx.csg_upd_failed_mask)
+ goto out_cleanup_ctx;
+
+ if (remaining_jiffies) {
+ /* Scheduling forced in the middle of a tick. Only RT groups
+ * can preempt non-RT ones. Currently running RT groups can't be
+ * preempted.
+ */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
+ true, true);
+ if (prio == PANTHOR_CSG_PRIORITY_RT) {
+ tick_ctx_pick_groups_from_list(sched, &ctx,
+ &sched->groups.runnable[prio],
+ true, false);
+ }
+ }
+ }
+
+ /* First pick non-idle groups */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
+ true, false);
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
+ }
+
+ /* If we have free CSG slots left, pick idle groups */
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
+ prio >= 0 && !tick_ctx_is_full(sched, &ctx);
+ prio--) {
+ /* Check the old_group queue first to avoid reprogramming the slots */
+ tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
+ tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
+ false, false);
+ }
+
+ tick_ctx_apply(sched, &ctx);
+ if (ctx.csg_upd_failed_mask)
+ goto out_cleanup_ctx;
+
+ if (ctx.idle_group_count == ctx.group_count) {
+ panthor_devfreq_record_idle(sched->ptdev);
+ if (sched->pm.has_ref) {
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+ sched->pm.has_ref = false;
+ }
+ } else {
+ panthor_devfreq_record_busy(sched->ptdev);
+ if (!sched->pm.has_ref) {
+ pm_runtime_get(ptdev->base.dev);
+ sched->pm.has_ref = true;
+ }
+ }
+
+ sched->last_tick = now;
+ resched_delay = tick_ctx_update_resched_target(sched, &ctx);
+ if (ctx.immediate_tick)
+ resched_delay = 0;
+
+ if (resched_delay != U64_MAX)
+ sched_queue_delayed_work(sched, tick, resched_delay);
+
+out_cleanup_ctx:
+ tick_ctx_cleanup(sched, &ctx);
+
+out_unlock:
+ mutex_unlock(&sched->lock);
+ pm_runtime_mark_last_busy(ptdev->base.dev);
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+}
+
+static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
+{
+ struct panthor_queue *queue = group->queues[queue_idx];
+ union {
+ struct panthor_syncobj_64b sync64;
+ struct panthor_syncobj_32b sync32;
+ } *syncobj;
+ bool result;
+ u64 value;
+
+ syncobj = panthor_queue_get_syncwait_obj(group, queue);
+ if (!syncobj)
+ return -EINVAL;
+
+ value = queue->syncwait.sync64 ?
+ syncobj->sync64.seqno :
+ syncobj->sync32.seqno;
+
+ if (queue->syncwait.gt)
+ result = value > queue->syncwait.ref;
+ else
+ result = value <= queue->syncwait.ref;
+
+ if (result)
+ panthor_queue_put_syncwait_obj(queue);
+
+ return result;
+}
+
+static void sync_upd_work(struct work_struct *work)
+{
+ struct panthor_scheduler *sched = container_of(work,
+ struct panthor_scheduler,
+ sync_upd_work);
+ struct panthor_group *group, *tmp;
+ bool immediate_tick = false;
+
+ mutex_lock(&sched->lock);
+ list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
+ u32 tested_queues = group->blocked_queues;
+ u32 unblocked_queues = 0;
+
+ while (tested_queues) {
+ u32 cs_id = ffs(tested_queues) - 1;
+ int ret;
+
+ ret = panthor_queue_eval_syncwait(group, cs_id);
+ drm_WARN_ON(&group->ptdev->base, ret < 0);
+ if (ret)
+ unblocked_queues |= BIT(cs_id);
+
+ tested_queues &= ~BIT(cs_id);
+ }
+
+ if (unblocked_queues) {
+ group->blocked_queues &= ~unblocked_queues;
+
+ if (group->csg_id < 0) {
+ list_move(&group->run_node,
+ &sched->groups.runnable[group->priority]);
+ if (group->priority == PANTHOR_CSG_PRIORITY_RT)
+ immediate_tick = true;
+ }
+ }
+
+ if (!group->blocked_queues)
+ list_del_init(&group->wait_node);
+ }
+ mutex_unlock(&sched->lock);
+
+ if (immediate_tick)
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct list_head *queue = &sched->groups.runnable[group->priority];
+ u64 delay_jiffies = 0;
+ bool was_idle;
+ u64 now;
+
+ if (!group_can_run(group))
+ return;
+
+ /* All updated queues are blocked, no need to wake up the scheduler. */
+ if ((queue_mask & group->blocked_queues) == queue_mask)
+ return;
+
+ was_idle = group_is_idle(group);
+ group->idle_queues &= ~queue_mask;
+
+ /* Don't mess up with the lists if we're in a middle of a reset. */
+ if (atomic_read(&sched->reset.in_progress))
+ return;
+
+ if (was_idle && !group_is_idle(group))
+ list_move_tail(&group->run_node, queue);
+
+ /* RT groups are preemptive. */
+ if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
+ sched_queue_delayed_work(sched, tick, 0);
+ return;
+ }
+
+ /* Some groups might be idle, force an immediate tick to
+ * re-evaluate.
+ */
+ if (sched->might_have_idle_groups) {
+ sched_queue_delayed_work(sched, tick, 0);
+ return;
+ }
+
+ /* Scheduler is ticking, nothing to do. */
+ if (sched->resched_target != U64_MAX) {
+ /* If there are free slots, force immediating ticking. */
+ if (sched->used_csg_slot_count < sched->csg_slot_count)
+ sched_queue_delayed_work(sched, tick, 0);
+
+ return;
+ }
+
+ /* Scheduler tick was off, recalculate the resched_target based on the
+ * last tick event, and queue the scheduler work.
+ */
+ now = get_jiffies_64();
+ sched->resched_target = sched->last_tick + sched->tick_period;
+ if (sched->used_csg_slot_count == sched->csg_slot_count &&
+ time_before64(now, sched->resched_target))
+ delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
+
+ sched_queue_delayed_work(sched, tick, delay_jiffies);
+}
+
+static void queue_stop(struct panthor_queue *queue,
+ struct panthor_job *bad_job)
+{
+ drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
+}
+
+static void queue_start(struct panthor_queue *queue)
+{
+ struct panthor_job *job;
+
+ /* Re-assign the parent fences. */
+ list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+
+ drm_sched_start(&queue->scheduler, true);
+}
+
+static void panthor_group_stop(struct panthor_group *group)
+{
+ struct panthor_scheduler *sched = group->ptdev->scheduler;
+
+ lockdep_assert_held(&sched->reset.lock);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ queue_stop(group->queues[i], NULL);
+
+ group_get(group);
+ list_move_tail(&group->run_node, &sched->reset.stopped_groups);
+}
+
+static void panthor_group_start(struct panthor_group *group)
+{
+ struct panthor_scheduler *sched = group->ptdev->scheduler;
+
+ lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
+
+ for (u32 i = 0; i < group->queue_count; i++)
+ queue_start(group->queues[i]);
+
+ if (group_can_run(group)) {
+ list_move_tail(&group->run_node,
+ group_is_idle(group) ?
+ &sched->groups.idle[group->priority] :
+ &sched->groups.runnable[group->priority]);
+ } else {
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+
+ group_put(group);
+}
+
+static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ sched_queue_delayed_work(sched, tick, 0);
+}
+
+/**
+ * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
+ */
+void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
+{
+ /* Force a tick to immediately kill faulty groups. */
+ if (ptdev->scheduler)
+ panthor_sched_immediate_tick(ptdev);
+}
+
+void panthor_sched_resume(struct panthor_device *ptdev)
+{
+ /* Force a tick to re-evaluate after a resume. */
+ panthor_sched_immediate_tick(ptdev);
+}
+
+void panthor_sched_suspend(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_csg_slots_upd_ctx upd_ctx;
+ u32 suspended_slots, faulty_slots;
+ struct panthor_group *group;
+ u32 i;
+
+ mutex_lock(&sched->lock);
+ csgs_upd_ctx_init(&upd_ctx);
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+
+ if (csg_slot->group) {
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
+ CSG_STATE_SUSPEND,
+ CSG_STATE_MASK);
+ }
+ }
+
+ suspended_slots = upd_ctx.update_mask;
+
+ csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+ suspended_slots &= ~upd_ctx.timedout_mask;
+ faulty_slots = upd_ctx.timedout_mask;
+
+ if (faulty_slots) {
+ u32 slot_mask = faulty_slots;
+
+ drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
+ csgs_upd_ctx_init(&upd_ctx);
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+
+ csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
+ CSG_STATE_TERMINATE,
+ CSG_STATE_MASK);
+ slot_mask &= ~BIT(csg_id);
+ }
+
+ csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
+
+ slot_mask = upd_ctx.timedout_mask;
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ /* Terminate command timedout, but the soft-reset will
+ * automatically terminate all active groups, so let's
+ * force the state to halted here.
+ */
+ if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
+ csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ slot_mask &= ~BIT(csg_id);
+ }
+ }
+
+ /* Flush L2 and LSC caches to make sure suspend state is up-to-date.
+ * If the flush fails, flag all queues for termination.
+ */
+ if (suspended_slots) {
+ bool flush_caches_failed = false;
+ u32 slot_mask = suspended_slots;
+
+ if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
+ flush_caches_failed = true;
+
+ while (slot_mask) {
+ u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ if (flush_caches_failed)
+ csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ else
+ csg_slot_sync_update_locked(ptdev, csg_id);
+
+ slot_mask &= ~BIT(csg_id);
+ }
+
+ if (flush_caches_failed)
+ faulty_slots |= suspended_slots;
+ }
+
+ for (i = 0; i < sched->csg_slot_count; i++) {
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+
+ group = csg_slot->group;
+ if (!group)
+ continue;
+
+ group_get(group);
+
+ if (group->csg_id >= 0)
+ sched_process_csg_irq_locked(ptdev, group->csg_id);
+
+ group_unbind_locked(group);
+
+ drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
+
+ if (group_can_run(group)) {
+ list_add(&group->run_node,
+ &sched->groups.idle[group->priority]);
+ } else {
+ /* We don't bother stopping the scheduler if the group is
+ * faulty, the group termination work will finish the job.
+ */
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+ group_put(group);
+ }
+ mutex_unlock(&sched->lock);
+}
+
+void panthor_sched_pre_reset(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group, *group_tmp;
+ u32 i;
+
+ mutex_lock(&sched->reset.lock);
+ atomic_set(&sched->reset.in_progress, true);
+
+ /* Cancel all scheduler works. Once this is done, these works can't be
+ * scheduled again until the reset operation is complete.
+ */
+ cancel_work_sync(&sched->sync_upd_work);
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ panthor_sched_suspend(ptdev);
+
+ /* Stop all groups that might still accept jobs, so we don't get passed
+ * new jobs while we're resetting.
+ */
+ for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
+ /* All groups should be in the idle lists. */
+ drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
+ list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
+ panthor_group_stop(group);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
+ list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
+ panthor_group_stop(group);
+ }
+
+ mutex_unlock(&sched->reset.lock);
+}
+
+void panthor_sched_post_reset(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group, *group_tmp;
+
+ mutex_lock(&sched->reset.lock);
+
+ list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
+ panthor_group_start(group);
+
+ /* We're done resetting the GPU, clear the reset.in_progress bit so we can
+ * kick the scheduler.
+ */
+ atomic_set(&sched->reset.in_progress, false);
+ mutex_unlock(&sched->reset.lock);
+
+ sched_queue_delayed_work(sched, tick, 0);
+
+ sched_queue_work(sched, sync_upd);
+}
+
+static void group_sync_upd_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, sync_upd_work);
+ struct panthor_job *job, *job_tmp;
+ LIST_HEAD(done_jobs);
+ u32 queue_idx;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
+ for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
+ struct panthor_queue *queue = group->queues[queue_idx];
+ struct panthor_syncobj_64b *syncobj;
+
+ if (!queue)
+ continue;
+
+ syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
+
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ if (!job->call_info.size)
+ continue;
+
+ if (syncobj->seqno < job->done_fence->seqno)
+ break;
+
+ list_move_tail(&job->node, &done_jobs);
+ dma_fence_signal_locked(job->done_fence);
+ }
+ spin_unlock(&queue->fence_ctx.lock);
+ }
+ dma_fence_end_signalling(cookie);
+
+ list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
+ list_del_init(&job->node);
+ panthor_job_put(&job->base);
+ }
+
+ group_put(group);
+}
+
+static struct dma_fence *
+queue_run_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+ u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
+ u64 addr_reg = ptdev->csif_info.cs_reg_count -
+ ptdev->csif_info.unpreserved_cs_reg_count;
+ u64 val_reg = addr_reg + 2;
+ u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
+ job->queue_idx * sizeof(struct panthor_syncobj_64b);
+ u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
+ struct dma_fence *done_fence;
+ int ret;
+
+ u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
+ /* MOV32 rX+2, cs.latest_flush */
+ (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
+
+ /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
+ (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
+
+ /* MOV48 rX:rX+1, cs.start */
+ (1ull << 56) | (addr_reg << 48) | job->call_info.start,
+
+ /* MOV32 rX+2, cs.size */
+ (2ull << 56) | (val_reg << 48) | job->call_info.size,
+
+ /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
+ (3ull << 56) | (1 << 16),
+
+ /* CALL rX:rX+1, rX+2 */
+ (32ull << 56) | (addr_reg << 40) | (val_reg << 32),
+
+ /* MOV48 rX:rX+1, sync_addr */
+ (1ull << 56) | (addr_reg << 48) | sync_addr,
+
+ /* MOV48 rX+2, #1 */
+ (1ull << 56) | (val_reg << 48) | 1,
+
+ /* WAIT(all) */
+ (3ull << 56) | (waitall_mask << 16),
+
+ /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
+ (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
+
+ /* ERROR_BARRIER, so we can recover from faults at job
+ * boundaries.
+ */
+ (47ull << 56),
+ };
+
+ /* Need to be cacheline aligned to please the prefetcher. */
+ static_assert(sizeof(call_instrs) % 64 == 0,
+ "call_instrs is not aligned on a cacheline");
+
+ /* Stream size is zero, nothing to do => return a NULL fence and let
+ * drm_sched signal the parent.
+ */
+ if (!job->call_info.size)
+ return NULL;
+
+ ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ if (drm_WARN_ON(&ptdev->base, ret))
+ return ERR_PTR(ret);
+
+ mutex_lock(&sched->lock);
+ if (!group_can_run(group)) {
+ done_fence = ERR_PTR(-ECANCELED);
+ goto out_unlock;
+ }
+
+ dma_fence_init(job->done_fence,
+ &panthor_queue_fence_ops,
+ &queue->fence_ctx.lock,
+ queue->fence_ctx.id,
+ atomic64_inc_return(&queue->fence_ctx.seqno));
+
+ memcpy(queue->ringbuf->kmap + ringbuf_insert,
+ call_instrs, sizeof(call_instrs));
+
+ panthor_job_get(&job->base);
+ spin_lock(&queue->fence_ctx.lock);
+ list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
+ spin_unlock(&queue->fence_ctx.lock);
+
+ job->ringbuf.start = queue->iface.input->insert;
+ job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
+
+ /* Make sure the ring buffer is updated before the INSERT
+ * register.
+ */
+ wmb();
+
+ queue->iface.input->extract = queue->iface.output->extract;
+ queue->iface.input->insert = job->ringbuf.end;
+
+ if (group->csg_id < 0) {
+ /* If the queue is blocked, we want to keep the timeout running, so we
+ * can detect unbounded waits and kill the group when that happens.
+ * Otherwise, we suspend the timeout so the time we spend waiting for
+ * a CSG slot is not counted.
+ */
+ if (!(group->blocked_queues & BIT(job->queue_idx)) &&
+ !queue->timeout_suspended) {
+ queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
+ queue->timeout_suspended = true;
+ }
+
+ group_schedule_locked(group, BIT(job->queue_idx));
+ } else {
+ gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
+ if (!sched->pm.has_ref &&
+ !(group->blocked_queues & BIT(job->queue_idx))) {
+ pm_runtime_get(ptdev->base.dev);
+ sched->pm.has_ref = true;
+ }
+ }
+
+ done_fence = dma_fence_get(job->done_fence);
+
+out_unlock:
+ mutex_unlock(&sched->lock);
+ pm_runtime_mark_last_busy(ptdev->base.dev);
+ pm_runtime_put_autosuspend(ptdev->base.dev);
+
+ return done_fence;
+}
+
+static enum drm_gpu_sched_stat
+queue_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ struct panthor_group *group = job->group;
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+
+ drm_warn(&ptdev->base, "job timeout\n");
+
+ drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
+
+ queue_stop(queue, job);
+
+ mutex_lock(&sched->lock);
+ group->timedout = true;
+ if (group->csg_id >= 0) {
+ sched_queue_delayed_work(ptdev->scheduler, tick, 0);
+ } else {
+ /* Remove from the run queues, so the scheduler can't
+ * pick the group on the next tick.
+ */
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+
+ group_queue_work(group, term);
+ }
+ mutex_unlock(&sched->lock);
+
+ queue_start(queue);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void queue_free_job(struct drm_sched_job *sched_job)
+{
+ drm_sched_job_cleanup(sched_job);
+ panthor_job_put(sched_job);
+}
+
+static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
+ .run_job = queue_run_job,
+ .timedout_job = queue_timedout_job,
+ .free_job = queue_free_job,
+};
+
+static struct panthor_queue *
+group_create_queue(struct panthor_group *group,
+ const struct drm_panthor_queue_create *args)
+{
+ struct drm_gpu_scheduler *drm_sched;
+ struct panthor_queue *queue;
+ int ret;
+
+ if (args->pad[0] || args->pad[1] || args->pad[2])
+ return ERR_PTR(-EINVAL);
+
+ if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
+ !is_power_of_2(args->ringbuf_size))
+ return ERR_PTR(-EINVAL);
+
+ if (args->priority > CSF_MAX_QUEUE_PRIO)
+ return ERR_PTR(-EINVAL);
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return ERR_PTR(-ENOMEM);
+
+ queue->fence_ctx.id = dma_fence_context_alloc(1);
+ spin_lock_init(&queue->fence_ctx.lock);
+ INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
+
+ queue->priority = args->priority;
+
+ queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
+ args->ringbuf_size,
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(queue->ringbuf)) {
+ ret = PTR_ERR(queue->ringbuf);
+ goto err_free_queue;
+ }
+
+ ret = panthor_kernel_bo_vmap(queue->ringbuf);
+ if (ret)
+ goto err_free_queue;
+
+ queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
+ &queue->iface.input,
+ &queue->iface.output,
+ &queue->iface.input_fw_va,
+ &queue->iface.output_fw_va);
+ if (IS_ERR(queue->iface.mem)) {
+ ret = PTR_ERR(queue->iface.mem);
+ goto err_free_queue;
+ }
+
+ ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
+ group->ptdev->scheduler->wq, 1,
+ args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
+ 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
+ group->ptdev->reset.wq,
+ NULL, "panthor-queue", group->ptdev->base.dev);
+ if (ret)
+ goto err_free_queue;
+
+ drm_sched = &queue->scheduler;
+ ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
+
+ return queue;
+
+err_free_queue:
+ group_free_queue(group, queue);
+ return ERR_PTR(ret);
+}
+
+#define MAX_GROUPS_PER_POOL 128
+
+int panthor_group_create(struct panthor_file *pfile,
+ const struct drm_panthor_group_create *group_args,
+ const struct drm_panthor_queue_create *queue_args)
+{
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
+ struct panthor_group *group = NULL;
+ u32 gid, i, suspend_size;
+ int ret;
+
+ if (group_args->pad)
+ return -EINVAL;
+
+ if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH)
+ return -EINVAL;
+
+ if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
+ (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
+ (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
+ return -EINVAL;
+
+ if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
+ hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
+ hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
+ return -EINVAL;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ spin_lock_init(&group->fatal_lock);
+ kref_init(&group->refcount);
+ group->state = PANTHOR_CS_GROUP_CREATED;
+ group->csg_id = -1;
+
+ group->ptdev = ptdev;
+ group->max_compute_cores = group_args->max_compute_cores;
+ group->compute_core_mask = group_args->compute_core_mask;
+ group->max_fragment_cores = group_args->max_fragment_cores;
+ group->fragment_core_mask = group_args->fragment_core_mask;
+ group->max_tiler_cores = group_args->max_tiler_cores;
+ group->tiler_core_mask = group_args->tiler_core_mask;
+ group->priority = group_args->priority;
+
+ INIT_LIST_HEAD(&group->wait_node);
+ INIT_LIST_HEAD(&group->run_node);
+ INIT_WORK(&group->term_work, group_term_work);
+ INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
+ INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
+ INIT_WORK(&group->release_work, group_release_work);
+
+ group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
+ if (!group->vm) {
+ ret = -EINVAL;
+ goto err_put_group;
+ }
+
+ suspend_size = csg_iface->control->suspend_size;
+ group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
+ if (IS_ERR(group->suspend_buf)) {
+ ret = PTR_ERR(group->suspend_buf);
+ group->suspend_buf = NULL;
+ goto err_put_group;
+ }
+
+ suspend_size = csg_iface->control->protm_suspend_size;
+ group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
+ if (IS_ERR(group->protm_suspend_buf)) {
+ ret = PTR_ERR(group->protm_suspend_buf);
+ group->protm_suspend_buf = NULL;
+ goto err_put_group;
+ }
+
+ group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
+ group_args->queues.count *
+ sizeof(struct panthor_syncobj_64b),
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+ if (IS_ERR(group->syncobjs)) {
+ ret = PTR_ERR(group->syncobjs);
+ goto err_put_group;
+ }
+
+ ret = panthor_kernel_bo_vmap(group->syncobjs);
+ if (ret)
+ goto err_put_group;
+
+ memset(group->syncobjs->kmap, 0,
+ group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+
+ for (i = 0; i < group_args->queues.count; i++) {
+ group->queues[i] = group_create_queue(group, &queue_args[i]);
+ if (IS_ERR(group->queues[i])) {
+ ret = PTR_ERR(group->queues[i]);
+ group->queues[i] = NULL;
+ goto err_put_group;
+ }
+
+ group->queue_count++;
+ }
+
+ group->idle_queues = GENMASK(group->queue_count - 1, 0);
+
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
+ mutex_lock(&sched->reset.lock);
+ if (atomic_read(&sched->reset.in_progress)) {
+ panthor_group_stop(group);
+ } else {
+ mutex_lock(&sched->lock);
+ list_add_tail(&group->run_node,
+ &sched->groups.idle[group->priority]);
+ mutex_unlock(&sched->lock);
+ }
+ mutex_unlock(&sched->reset.lock);
+
+ return gid;
+
+err_put_group:
+ group_put(group);
+ return ret;
+}
+
+int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group;
+
+ group = xa_erase(&gpool->xa, group_handle);
+ if (!group)
+ return -EINVAL;
+
+ for (u32 i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ drm_sched_entity_destroy(&group->queues[i]->entity);
+ }
+
+ mutex_lock(&sched->reset.lock);
+ mutex_lock(&sched->lock);
+ group->destroyed = true;
+ if (group->csg_id >= 0) {
+ sched_queue_delayed_work(sched, tick, 0);
+ } else if (!atomic_read(&sched->reset.in_progress)) {
+ /* Remove from the run queues, so the scheduler can't
+ * pick the group on the next tick.
+ */
+ list_del_init(&group->run_node);
+ list_del_init(&group->wait_node);
+ group_queue_work(group, term);
+ }
+ mutex_unlock(&sched->lock);
+ mutex_unlock(&sched->reset.lock);
+
+ group_put(group);
+ return 0;
+}
+
+int panthor_group_get_state(struct panthor_file *pfile,
+ struct drm_panthor_group_get_state *get_state)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_device *ptdev = pfile->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_group *group;
+
+ if (get_state->pad)
+ return -EINVAL;
+
+ group = group_get(xa_load(&gpool->xa, get_state->group_handle));
+ if (!group)
+ return -EINVAL;
+
+ memset(get_state, 0, sizeof(*get_state));
+
+ mutex_lock(&sched->lock);
+ if (group->timedout)
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
+ if (group->fatal_queues) {
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
+ get_state->fatal_queues = group->fatal_queues;
+ }
+ mutex_unlock(&sched->lock);
+
+ group_put(group);
+ return 0;
+}
+
+int panthor_group_pool_create(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool;
+
+ gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
+ if (!gpool)
+ return -ENOMEM;
+
+ xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
+ pfile->groups = gpool;
+ return 0;
+}
+
+void panthor_group_pool_destroy(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_for_each(&gpool->xa, i, group)
+ panthor_group_destroy(pfile, i);
+
+ xa_destroy(&gpool->xa);
+ kfree(gpool);
+ pfile->groups = NULL;
+}
+
+static void job_release(struct kref *ref)
+{
+ struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
+
+ drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+
+ if (job->done_fence && job->done_fence->ops)
+ dma_fence_put(job->done_fence);
+ else
+ dma_fence_free(job->done_fence);
+
+ group_put(job->group);
+
+ kfree(job);
+}
+
+struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
+{
+ if (sched_job) {
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ kref_get(&job->refcount);
+ }
+
+ return sched_job;
+}
+
+void panthor_job_put(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ if (sched_job)
+ kref_put(&job->refcount, job_release);
+}
+
+struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ return job->group->vm;
+}
+
+struct drm_sched_job *
+panthor_job_create(struct panthor_file *pfile,
+ u16 group_handle,
+ const struct drm_panthor_queue_submit *qsubmit)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_job *job;
+ int ret;
+
+ if (qsubmit->pad)
+ return ERR_PTR(-EINVAL);
+
+ /* If stream_addr is zero, so stream_size should be. */
+ if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
+ return ERR_PTR(-EINVAL);
+
+ /* Make sure the address is aligned on 64-byte (cacheline) and the size is
+ * aligned on 8-byte (instruction size).
+ */
+ if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
+ return ERR_PTR(-EINVAL);
+
+ /* bits 24:30 must be zero. */
+ if (qsubmit->latest_flush & GENMASK(30, 24))
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&job->refcount);
+ job->queue_idx = qsubmit->queue_index;
+ job->call_info.size = qsubmit->stream_size;
+ job->call_info.start = qsubmit->stream_addr;
+ job->call_info.latest_flush = qsubmit->latest_flush;
+ INIT_LIST_HEAD(&job->node);
+
+ job->group = group_get(xa_load(&gpool->xa, group_handle));
+ if (!job->group) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
+ if (job->queue_idx >= job->group->queue_count ||
+ !job->group->queues[job->queue_idx]) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
+ job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
+ if (!job->done_fence) {
+ ret = -ENOMEM;
+ goto err_put_job;
+ }
+
+ ret = drm_sched_job_init(&job->base,
+ &job->group->queues[job->queue_idx]->entity,
+ 1, job->group);
+ if (ret)
+ goto err_put_job;
+
+ return &job->base;
+
+err_put_job:
+ panthor_job_put(&job->base);
+ return ERR_PTR(ret);
+}
+
+void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+
+ /* Still not sure why we want USAGE_WRITE for external objects, since I
+ * was assuming this would be handled through explicit syncs being imported
+ * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
+ * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
+ */
+ panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+}
+
+void panthor_sched_unplug(struct panthor_device *ptdev)
+{
+ struct panthor_scheduler *sched = ptdev->scheduler;
+
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ mutex_lock(&sched->lock);
+ if (sched->pm.has_ref) {
+ pm_runtime_put(ptdev->base.dev);
+ sched->pm.has_ref = false;
+ }
+ mutex_unlock(&sched->lock);
+}
+
+static void panthor_sched_fini(struct drm_device *ddev, void *res)
+{
+ struct panthor_scheduler *sched = res;
+ int prio;
+
+ if (!sched || !sched->csg_slot_count)
+ return;
+
+ cancel_delayed_work_sync(&sched->tick_work);
+
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+
+ if (sched->heap_alloc_wq)
+ destroy_workqueue(sched->heap_alloc_wq);
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
+ }
+
+ drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
+}
+
+int panthor_sched_init(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
+ struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
+ struct panthor_scheduler *sched;
+ u32 gpu_as_count, num_groups;
+ int prio, ret;
+
+ sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
+ if (!sched)
+ return -ENOMEM;
+
+ /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
+ * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
+ */
+ num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
+
+ /* The FW-side scheduler might deadlock if two groups with the same
+ * priority try to access a set of resources that overlaps, with part
+ * of the resources being allocated to one group and the other part to
+ * the other group, both groups waiting for the remaining resources to
+ * be allocated. To avoid that, it is recommended to assign each CSG a
+ * different priority. In theory we could allow several groups to have
+ * the same CSG priority if they don't request the same resources, but
+ * that makes the scheduling logic more complicated, so let's clamp
+ * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
+ */
+ num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
+
+ /* We need at least one AS for the MCU and one for the GPU contexts. */
+ gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
+ if (!gpu_as_count) {
+ drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
+ gpu_as_count + 1);
+ return -EINVAL;
+ }
+
+ sched->ptdev = ptdev;
+ sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
+ sched->csg_slot_count = num_groups;
+ sched->cs_slot_count = csg_iface->control->stream_num;
+ sched->as_slot_count = gpu_as_count;
+ ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
+ ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
+ ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
+
+ sched->last_tick = 0;
+ sched->resched_target = U64_MAX;
+ sched->tick_period = msecs_to_jiffies(10);
+ INIT_DELAYED_WORK(&sched->tick_work, tick_work);
+ INIT_WORK(&sched->sync_upd_work, sync_upd_work);
+ INIT_WORK(&sched->fw_events_work, process_fw_events_work);
+
+ ret = drmm_mutex_init(&ptdev->base, &sched->lock);
+ if (ret)
+ return ret;
+
+ for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
+ INIT_LIST_HEAD(&sched->groups.runnable[prio]);
+ INIT_LIST_HEAD(&sched->groups.idle[prio]);
+ }
+ INIT_LIST_HEAD(&sched->groups.waiting);
+
+ ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&sched->reset.stopped_groups);
+
+ /* sched->heap_alloc_wq will be used for heap chunk allocation on
+ * tiler OOM events, which means we can't use the same workqueue for
+ * the scheduler because works queued by the scheduler are in
+ * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
+ * work around this limitation.
+ *
+ * FIXME: Ultimately, what we need is a failable/non-blocking GEM
+ * allocation path that we can call when a heap OOM is reported. The
+ * FW is smart enough to fall back on other methods if the kernel can't
+ * allocate memory, and fail the tiling job if none of these
+ * countermeasures worked.
+ *
+ * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
+ * system is running out of memory.
+ */
+ sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
+ sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!sched->wq || !sched->heap_alloc_wq) {
+ panthor_sched_fini(&ptdev->base, sched);
+ drm_err(&ptdev->base, "Failed to allocate the workqueues");
+ return -ENOMEM;
+ }
+
+ ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
+ if (ret)
+ return ret;
+
+ ptdev->scheduler = sched;
+ return 0;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
new file mode 100644
index 000000000000..66438b1f331f
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2023 Collabora ltd. */
+
+#ifndef __PANTHOR_SCHED_H__
+#define __PANTHOR_SCHED_H__
+
+struct drm_exec;
+struct dma_fence;
+struct drm_file;
+struct drm_gem_object;
+struct drm_sched_job;
+struct drm_panthor_group_create;
+struct drm_panthor_queue_create;
+struct drm_panthor_group_get_state;
+struct drm_panthor_queue_submit;
+struct panthor_device;
+struct panthor_file;
+struct panthor_group_pool;
+struct panthor_job;
+
+int panthor_group_create(struct panthor_file *pfile,
+ const struct drm_panthor_group_create *group_args,
+ const struct drm_panthor_queue_create *queue_args);
+int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
+int panthor_group_get_state(struct panthor_file *pfile,
+ struct drm_panthor_group_get_state *get_state);
+
+struct drm_sched_job *
+panthor_job_create(struct panthor_file *pfile,
+ u16 group_handle,
+ const struct drm_panthor_queue_submit *qsubmit);
+struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
+struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
+void panthor_job_put(struct drm_sched_job *job);
+void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
+
+int panthor_group_pool_create(struct panthor_file *pfile);
+void panthor_group_pool_destroy(struct panthor_file *pfile);
+
+int panthor_sched_init(struct panthor_device *ptdev);
+void panthor_sched_unplug(struct panthor_device *ptdev);
+void panthor_sched_pre_reset(struct panthor_device *ptdev);
+void panthor_sched_post_reset(struct panthor_device *ptdev);
+void panthor_sched_suspend(struct panthor_device *ptdev);
+void panthor_sched_resume(struct panthor_device *ptdev);
+
+void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
+void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 281edab518cd..d6ea01f3797b 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
{
uint32_t handle;
int idr_ret;
- int count = 0;
again:
idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
@@ -433,7 +432,6 @@ again:
handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) {
- count++;
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index dd0f834d881c..506ae1f5e099 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct qxl_release *release;
struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret, num_relocs;
+ int i, ret;
int unwritten;
switch (cmd->type) {
@@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
}
/* fill out reloc info structs */
- num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
@@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].dst_bo = cmd_bo;
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
}
- num_relocs++;
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1e46b0a6e478..5893e27a7ae5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -29,9 +29,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-static int __qxl_bo_pin(struct qxl_bo *bo);
-static void __qxl_bo_unpin(struct qxl_bo *bo);
-
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
@@ -167,13 +164,9 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
goto out;
}
- r = __qxl_bo_pin(bo);
- if (r)
- return r;
-
r = ttm_bo_vmap(&bo->tbo, &bo->map);
if (r) {
- __qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
return r;
}
bo->map_count = 1;
@@ -246,7 +239,6 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
return;
bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map);
- __qxl_bo_unpin(bo);
}
int qxl_bo_vunmap(struct qxl_bo *bo)
@@ -290,12 +282,14 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo)
+int qxl_bo_pin_locked(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->tbo.base.dev;
int r;
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->tbo.pin_count) {
ttm_bo_pin(&bo->tbo);
return 0;
@@ -309,14 +303,16 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
return r;
}
-static void __qxl_bo_unpin(struct qxl_bo *bo)
+void qxl_bo_unpin_locked(struct qxl_bo *bo)
{
+ dma_resv_assert_held(bo->tbo.base.resv);
+
ttm_bo_unpin(&bo->tbo);
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
- * beforehand, use the internal version directly __qxl_bo_pin.
+ * beforehand, use the internal version directly qxl_bo_pin_locked.
*
*/
int qxl_bo_pin(struct qxl_bo *bo)
@@ -327,14 +323,14 @@ int qxl_bo_pin(struct qxl_bo *bo)
if (r)
return r;
- r = __qxl_bo_pin(bo);
+ r = qxl_bo_pin_locked(bo);
qxl_bo_unreserve(bo);
return r;
}
/*
* Reserve the BO before pinning the object. If the BO was reserved
- * beforehand, use the internal version directly __qxl_bo_unpin.
+ * beforehand, use the internal version directly qxl_bo_unpin_locked.
*
*/
int qxl_bo_unpin(struct qxl_bo *bo)
@@ -345,7 +341,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
if (r)
return r;
- __qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
qxl_bo_unreserve(bo);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 53392cb90eec..1cf5bc759101 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -67,6 +67,8 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin_locked(struct qxl_bo *bo);
+extern void qxl_bo_unpin_locked(struct qxl_bo *bo);
extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 9169c26357d3..19bf551a7b31 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -32,14 +32,14 @@ int qxl_gem_prime_pin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- return qxl_bo_pin(bo);
+ return qxl_bo_pin_locked(bo);
}
void qxl_gem_prime_unpin(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- qxl_bo_unpin(bo);
+ qxl_bo_unpin_locked(bo);
}
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..9febc8b73f09 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
+ struct qxl_release *release;
+ int count = 0, sc = 0;
+ bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
+ release = container_of(fence, struct qxl_release, base);
+ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
- if (!wait_event_timeout(qdev->release_event,
- (dma_fence_is_signaled(fence) ||
- (qxl_io_notify_oom(qdev), 0)),
- timeout))
- return 0;
+retry:
+ sc++;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ qxl_io_notify_oom(qdev);
+
+ for (count = 0; count < 11; count++) {
+ if (!qxl_queue_garbage_collect(qdev, true))
+ break;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+ }
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+
+ if (time_after(jiffies, end))
+ return 0;
+
+ if (have_drawable_releases && sc > 300) {
+ DMA_FENCE_WARN(fence,
+ "failed to wait on release %llu after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
+ goto signaled;
+ }
+ goto retry;
+ }
+ /*
+ * yeah, original sync_obj_wait gave up after 3 spins when
+ * have_drawable_releases is not set.
+ */
+signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f98356be0af2..18c867219a70 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -2,11 +2,13 @@
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI && MMU
depends on AGP || !AGP
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on PCI
+ depends on MMU
select FW_LOADER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SUBALLOC_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 94947229888b..b7f22597ee95 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[1];
+ ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 86b8b770af19..0b1e19345f43 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 25201b9a5aae..1620f534f55f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index eae8a6389f5e..a979662eaa73 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index b5e97d95a19f..087d41e370fd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -26,11 +26,12 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <drm/drm_device.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3e5ff17e3caf..0999c8eaae94 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -132,7 +132,6 @@ extern int radeon_cik_support;
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
/* internal ring indices */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bb1f0a3371ab..10793a433bf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index efd18c8d84c8..5f1d24d3120c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
- radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
if (radeon_crtc == NULL)
return;
@@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
-#if 0
- radeon_crtc->mode_set.crtc = &radeon_crtc->base;
- radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
- radeon_crtc->mode_set.num_connectors = 0;
-#endif
-
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9ebe4a0b9a6c..4fb780d96f32 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -30,6 +30,7 @@
*/
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/kref.h>
#include <linux/sched/signal.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3fec3acdaf28..2ef201a072f1 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/iosys-map.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index fb9ecf5dbe2b..63d914f3414d 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -27,6 +27,8 @@
* Christian König
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_file.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4482c8c5f5ce..2d9d9f46f243 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -21,6 +21,7 @@
* Alex Deucher <alexdeucher@gmail.com>
*/
+#include <linux/debugfs.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b3cfc99f4d7e..a77881f035e7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -73,32 +73,21 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
- ret = radeon_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
bo->prime_shared_count++;
- radeon_bo_unreserve(bo);
return ret;
}
void radeon_gem_prime_unpin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int ret = 0;
-
- ret = radeon_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return;
radeon_bo_unpin(bo);
if (bo->prime_shared_count)
bo->prime_shared_count--;
- radeon_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 38048593bb4a..8d1d458286a8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -27,6 +27,8 @@
* Christian König
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_device.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 2078b0000e22..5c65b6dfb99a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -30,6 +30,7 @@
* Dave Airlie
*/
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d7f552d441ab..d4d1501e6576 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 79709d26d983..bbc6ccabf788 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index 53c356aed5d5..2dc739db2ba3 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -25,8 +25,8 @@ config DRM_RCAR_CMM
config DRM_RCAR_DW_HDMI
tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
depends on DRM && OF
+ depends on DRM_DW_HDMI
depends on DRM_RCAR_DU || COMPILE_TEST
- select DRM_DW_HDMI
help
Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 0ae6331d6430..8643ff2eec46 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -66,9 +66,6 @@ void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc)
void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc)
{
struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
- struct rzg2l_du_crtc_state *state;
-
- state = to_rzg2l_crtc_state(crtc->crtc.state);
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 1bf3e2829cd0..4c7072e6e34e 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -7,7 +7,6 @@ config DRM_ROCKCHIP
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
- select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
@@ -36,9 +35,9 @@ config ROCKCHIP_VOP2
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER=y || (DRM_DISPLAY_HELPER=m && DRM_ROCKCHIP=m)
depends on ROCKCHIP_VOP
- select DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
@@ -46,9 +45,9 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER=y || (DRM_DISPLAY_HELPER=m && DRM_ROCKCHIP=m)
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
- select DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
@@ -57,6 +56,7 @@ config ROCKCHIP_CDN_DP
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
+ depends on DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a855c45ae7f3..bd7aa891b839 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -262,20 +262,21 @@ static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
static int cdn_dp_connector_get_modes(struct drm_connector *connector)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
- struct edid *edid;
int ret = 0;
mutex_lock(&dp->lock);
- edid = dp->edid;
- if (edid) {
+
+ if (dp->drm_edid) {
+ /* FIXME: get rid of drm_edid_raw() */
+ const struct edid *edid = drm_edid_raw(dp->drm_edid);
+
DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
- dp->sink_has_audio = drm_detect_monitor_audio(edid);
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
}
+
+ ret = drm_edid_connector_add_modes(connector);
+
mutex_unlock(&dp->lock);
return ret;
@@ -380,9 +381,13 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
return ret;
}
- kfree(dp->edid);
- dp->edid = drm_do_get_edid(&dp->connector,
- cdn_dp_get_edid_block, dp);
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = drm_edid_read_custom(&dp->connector,
+ cdn_dp_get_edid_block, dp);
+ drm_edid_connector_update(&dp->connector, dp->drm_edid);
+
+ dp->sink_has_audio = dp->connector.display_info.has_audio;
+
return 0;
}
@@ -488,8 +493,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
dp->max_lanes = 0;
dp->max_rate = 0;
if (!dp->connected) {
- kfree(dp->edid);
- dp->edid = NULL;
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = NULL;
}
return 0;
@@ -1131,8 +1136,8 @@ static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
pm_runtime_disable(dev);
if (dp->fw_loaded)
release_firmware(dp->fw);
- kfree(dp->edid);
- dp->edid = NULL;
+ drm_edid_free(dp->drm_edid);
+ dp->drm_edid = NULL;
}
static const struct component_ops cdn_dp_component_ops = {
@@ -1259,7 +1264,6 @@ struct platform_driver cdn_dp_driver = {
.shutdown = cdn_dp_shutdown,
.driver = {
.name = "cdn-dp",
- .owner = THIS_MODULE,
.of_match_table = cdn_dp_dt_ids,
.pm = &cdn_dp_pm_ops,
},
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 5b2fed1f5f55..8e6e95d269da 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -70,7 +70,7 @@ struct cdn_dp_device {
struct drm_display_mode mode;
struct platform_device *audio_pdev;
struct work_struct event_work;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
struct mutex lock;
bool connected;
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 1d2261643743..3df2cfcf9998 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -606,18 +606,16 @@ inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 95cd1b49eda8..784de990da1b 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -466,18 +466,16 @@ rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = 0;
if (!hdmi->ddc)
return 0;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index fdd768bbd487..62ebbdb16253 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -706,6 +706,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
const struct drm_format_info *info;
u16 hor_scl_mode, ver_scl_mode;
u16 hscl_filter_mode, vscl_filter_mode;
+ uint16_t cbcr_src_w = src_w;
+ uint16_t cbcr_src_h = src_h;
u8 gt2 = 0;
u8 gt4 = 0;
u32 val;
@@ -763,27 +765,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
if (info->is_yuv) {
- src_w /= info->hsub;
- src_h /= info->vsub;
+ cbcr_src_w /= info->hsub;
+ cbcr_src_h /= info->vsub;
gt4 = 0;
gt2 = 0;
- if (src_h >= (4 * dst_h)) {
+ if (cbcr_src_h >= (4 * dst_h)) {
gt4 = 1;
- src_h >>= 2;
- } else if (src_h >= (2 * dst_h)) {
+ cbcr_src_h >>= 2;
+ } else if (cbcr_src_h >= (2 * dst_h)) {
gt2 = 1;
- src_h >>= 1;
+ cbcr_src_h >>= 1;
}
- hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
- ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+ hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+ ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
- val = vop2_scale_factor(src_w, dst_w);
+ val = vop2_scale_factor(cbcr_src_w, dst_w);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
- val = vop2_scale_factor(src_h, dst_h);
+ val = vop2_scale_factor(cbcr_src_h, dst_h);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 77b76cff1adb..9a01aa450741 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -17,7 +17,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 48170694ac6b..18efb3fe1c00 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -17,9 +17,7 @@
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3c4f5a392b06..58c8161289fe 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ /*
+ * It's perfectly valid to initialize an entity without having a valid
+ * scheduler attached. It's just not valid to use the scheduler before it
+ * is initialized itself.
+ */
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
- if (!sched_list[0]->sched_rq) {
- /* Warn drivers not to do this and to fix their DRM
- * calling order.
+ if (num_sched_list && !sched_list[0]->sched_rq) {
+ /* Since every entry covered by num_sched_list
+ * should be non-NULL and therefore we warn drivers
+ * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 4bab93c4fefd..1799c12babf5 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -5,6 +5,7 @@
*/
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 4741d9f6544c..5b19c7cb7b7e 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -57,8 +57,8 @@ config DRM_SUN6I_DSI
config DRM_SUN8I_DW_HDMI
tristate "Support for Allwinner version of DesignWare HDMI"
depends on DRM_SUN4I
+ depends on DRM_DW_HDMI
default DRM_SUN4I
- select DRM_DW_HDMI
help
Choose this option if you have an Allwinner SoC with the
DesignWare HDMI controller. SoCs that support HDMI and
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 2d1880c61b50..245b34adca5a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -214,20 +214,24 @@ sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_i2c ?: hdmi->i2c);
+
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+
+ if (!drm_edid)
return 0;
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
connector->display_info.is_hdmi ? "an HDMI" : "a DVI");
- drm_connector_update_edid_property(connector, edid);
- cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 84e7e6bc3a0c..6974caa99ece 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,11 +4,11 @@ config DRM_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on COMMON_CLK
depends on DRM
+ depends on DRM_DISPLAY_DP_AUX_BUS
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
depends on OF
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index e48863a44556..e3b50e240d36 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -103,7 +103,7 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, bias_size, bias_size);
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
/* single page with internal round_up */
KUNIT_ASSERT_FALSE_MSG(test,
@@ -113,7 +113,7 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
DRM_BUDDY_RANGE_ALLOCATION),
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
bias_start, bias_end, ps, bias_size);
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
/* random size within */
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
@@ -153,14 +153,14 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
* unallocated, and ideally not always on the bias
* boundaries.
*/
- drm_buddy_free_list(&mm, &tmp);
+ drm_buddy_free_list(&mm, &tmp, 0);
} else {
list_splice_tail(&tmp, &allocated);
}
}
kfree(order);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
/*
@@ -220,7 +220,149 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
"buddy_alloc passed with bias(%x-%x), size=%u\n",
bias_start, bias_end, ps);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_clear(struct kunit *test)
+{
+ unsigned long n_pages, total, i = 0;
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned long ps = SZ_4K;
+ struct drm_buddy_block *block;
+ const int max_order = 12;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+ unsigned int order;
+ u32 mm_size, size;
+ LIST_HEAD(dirty);
+ LIST_HEAD(clean);
+
+ mm_size = SZ_4K << max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ /*
+ * Idea is to allocate and free some random portion of the address space,
+ * returning those pages as non-dirty and randomly alternate between
+ * requesting dirty and non-dirty pages (not going over the limit
+ * we freed as non-dirty), putting that into two separate lists.
+ * Loop over both lists at the end checking that the dirty list
+ * is indeed all dirty pages and vice versa. Free it all again,
+ * keeping the dirty/clear status.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 5 * ps, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 5 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+
+ n_pages = 10;
+ do {
+ unsigned long flags;
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0) {
+ list = &dirty;
+ flags = 0;
+ } else {
+ list = &clean;
+ flags = DRM_BUDDY_CLEAR_ALLOCATION;
+ }
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list,
+ flags),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ list_for_each_entry(block, &clean, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
+
+ list_for_each_entry(block, &dirty, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+
+ /*
+ * Trying to go over the clear limit for some allocation.
+ * The allocation should never fail with reasonable page-size.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 10 * ps, ps, &clean,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 10 * ps);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+ drm_buddy_fini(&mm);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Create a new mm. Intentionally fragment the address space by creating
+ * two alternating lists. Free both lists, one as dirty the other as clean.
+ * Try to allocate double the previous size with matching min_page_size. The
+ * allocation should never fail as it calls the force_merge. Also check that
+ * the page is always dirty after force_merge. Free the page as dirty, then
+ * repeat the whole thing, increment the order until we hit the max_order.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0)
+ list = &dirty;
+ else
+ list = &clean;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+
+ order = 1;
+ do {
+ size = SZ_4K << order;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, size, &allocated,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ total = 0;
+ list_for_each_entry(block, &allocated, link) {
+ if (size != mm_size)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+ total += drm_buddy_block_size(&mm, block);
+ }
+ KUNIT_EXPECT_EQ(test, total, size);
+
+ drm_buddy_free_list(&mm, &allocated, 0);
+ } while (++order <= max_order);
+
+ drm_buddy_fini(&mm);
+
+ /*
+ * Create a new mm with a non power-of-two size. Allocate a random size, free as
+ * cleared and then call fini. This will ensure the multi-root force merge during
+ * fini.
+ */
+ mm_size = 12 * SZ_4K;
+ size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
drm_buddy_fini(&mm);
}
@@ -269,7 +411,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%lu\n", 3 * ps);
- drm_buddy_free_list(&mm, &middle);
+ drm_buddy_free_list(&mm, &middle, 0);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -279,7 +421,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc didn't error size=%lu\n", 2 * ps);
- drm_buddy_free_list(&mm, &right);
+ drm_buddy_free_list(&mm, &right, 0);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -294,7 +436,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
"buddy_alloc hit an error size=%lu\n", 2 * ps);
- drm_buddy_free_list(&mm, &left);
+ drm_buddy_free_list(&mm, &left, 0);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
@@ -306,7 +448,7 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
}
@@ -375,7 +517,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
top, max_order);
}
- drm_buddy_free_list(&mm, &holes);
+ drm_buddy_free_list(&mm, &holes, 0);
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
@@ -387,7 +529,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
list_splice_tail(&holes, &blocks);
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -482,7 +624,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -528,7 +670,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
- drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_free_list(&mm, &blocks, 0);
drm_buddy_fini(&mm);
}
@@ -563,7 +705,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE);
- drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
}
@@ -584,6 +726,7 @@ static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_clear),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index a0e494c806a9..f371518f8697 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -135,8 +135,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
dev_dbg(dev, "no panel/bridge for port %d\n", i);
continue;
} else if (ret) {
- dev_dbg(dev, "port %d probe returned %d\n", i, ret);
- return ret;
+ return dev_err_probe(dev, ret, "port %d probe failed\n", i);
}
if (panel) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 9aefd010acde..68093d6b6b16 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -6,7 +6,6 @@
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <video/display_timing.h>
@@ -308,7 +307,6 @@ static int panel_probe(struct platform_device *pdev)
struct backlight_device *backlight;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
- struct pinctrl *pinctrl;
int ret;
/* bail out early if no DT data: */
@@ -342,10 +340,6 @@ static int panel_probe(struct platform_device *pdev)
tilcdc_module_init(mod, "panel", &panel_module_ops);
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "pins are not configured\n");
-
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 7ce1c4617675..1d8fa07572c5 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
@@ -671,11 +672,26 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan
drm_dev_exit(idx);
}
+static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
+
+ sb->width = sdev->mode.hdisplay;
+ sb->height = sdev->mode.vdisplay;
+ sb->format = sdev->format;
+ sb->pitch[0] = sdev->pitch;
+ sb->map[0] = sdev->screen_base;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = simpledrm_primary_plane_helper_atomic_check,
.atomic_update = simpledrm_primary_plane_helper_atomic_update,
.atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
+ .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 96a724e8f3ff..6396dece0db1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -402,7 +402,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_put);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
@@ -469,7 +468,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
if (ret != -EMULTIHOP)
break;
- ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
} while (!ret);
if (ret) {
@@ -698,7 +697,6 @@ EXPORT_SYMBOL(ttm_bo_unpin);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
- struct ttm_resource *mem,
bool no_wait_gpu)
{
struct dma_fence *fence;
@@ -724,64 +722,36 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
-/*
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
- struct ww_acquire_ctx *ticket;
- int ret;
-
- man = ttm_manager_type(bdev, place->mem_type);
- ticket = dma_resv_locking_ctx(bo->base.resv);
- do {
- ret = ttm_resource_alloc(bo, place, mem);
- if (likely(!ret))
- break;
- if (unlikely(ret != -ENOSPC))
- return ret;
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret != 0))
- return ret;
- } while (1);
-
- return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
-}
-
/**
- * ttm_bo_mem_space
+ * ttm_bo_alloc_resource - Allocate backing store for a BO
*
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
* @ctx: if and how to sleep, lock buffers and alloc memory
+ * @force_space: If we should evict buffers to force space
+ * @res: The resulting struct ttm_resource.
*
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @placement, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
+ * Allocates a resource for the buffer object pointed to by @bo, using the
+ * placement flags in @placement, potentially evicting other buffer objects when
+ * @force_space is true.
+ * This function may sleep while waiting for resources to become available.
* Returns:
- * -EBUSY: No space available (only if no_wait == 1).
+ * -EBUSY: No space available (only if no_wait == true).
* -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
+static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx,
+ bool force_space,
+ struct ttm_resource **res)
{
struct ttm_device *bdev = bo->bdev;
- bool type_found = false;
+ struct ww_acquire_ctx *ticket;
int i, ret;
+ ticket = dma_resv_locking_ctx(bo->base.resv);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -790,98 +760,73 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- if (place->flags & TTM_PL_FLAG_FALLBACK)
- continue;
-
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
- type_found = true;
- ret = ttm_resource_alloc(bo, place, mem);
- if (ret == -ENOSPC)
+ if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
+
+ do {
+ ret = ttm_resource_alloc(bo, place, res);
+ if (unlikely(ret && ret != -ENOSPC))
+ return ret;
+ if (likely(!ret) || !force_space)
+ break;
+
+ ret = ttm_mem_evict_first(bdev, man, place, ctx,
+ ticket);
+ if (unlikely(ret == -EBUSY))
+ break;
+ if (unlikely(ret))
+ return ret;
+ } while (1);
+ if (ret)
continue;
- if (unlikely(ret))
- goto error;
- ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
+ ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
- ttm_resource_free(bo, mem);
+ ttm_resource_free(bo, res);
if (ret == -EBUSY)
continue;
- goto error;
+ return ret;
}
return 0;
}
- for (i = 0; i < placement->num_placement; ++i) {
- const struct ttm_place *place = &placement->placement[i];
- struct ttm_resource_manager *man;
-
- if (place->flags & TTM_PL_FLAG_DESIRED)
- continue;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- continue;
-
- type_found = true;
- ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
- if (likely(!ret))
- return 0;
-
- if (ret && ret != -EBUSY)
- goto error;
- }
-
- ret = -ENOSPC;
- if (!type_found) {
- pr_err(TTM_PFX "No compatible memory type found\n");
- ret = -EINVAL;
- }
-
-error:
- return ret;
+ return -ENOSPC;
}
-EXPORT_SYMBOL(ttm_bo_mem_space);
-static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx)
+/*
+ * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
+ *
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
+ * @res: The resulting struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Tries both idle allocation and forcefully eviction of buffers. See
+ * ttm_bo_alloc_resource for details.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **res,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_resource *mem;
- struct ttm_place hop;
+ bool force_space = false;
int ret;
- dma_resv_assert_held(bo->base.resv);
+ do {
+ ret = ttm_bo_alloc_resource(bo, placement, ctx,
+ force_space, res);
+ force_space = !force_space;
+ } while (ret == -ENOSPC && force_space);
- /*
- * Determine where to move the buffer.
- *
- * If driver determines move is going to need
- * an extra step then it will return -EMULTIHOP
- * and the buffer will be moved to the temporary
- * stop and the driver will be called to make
- * the second hop.
- */
- ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
- if (ret)
- return ret;
-bounce:
- ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
- if (ret == -EMULTIHOP) {
- ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
- if (ret)
- goto out;
- /* try and move to final place now. */
- goto bounce;
- }
-out:
- if (ret)
- ttm_resource_free(bo, &mem);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_mem_space);
/**
* ttm_bo_validate
@@ -902,6 +847,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *res;
+ struct ttm_place hop;
+ bool force_space;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -912,20 +860,53 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
- /* Check whether we need to move buffer. */
- if (bo->resource && ttm_resource_compatible(bo->resource, placement))
- return 0;
+ force_space = false;
+ do {
+ /* Check whether we need to move buffer. */
+ if (bo->resource &&
+ ttm_resource_compatible(bo->resource, placement,
+ force_space))
+ return 0;
- /* Moving of pinned BOs is forbidden */
- if (bo->pin_count)
- return -EINVAL;
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
+ /*
+ * Determine where to move the buffer.
+ *
+ * If driver determines move is going to need
+ * an extra step then it will return -EMULTIHOP
+ * and the buffer will be moved to the temporary
+ * stop and the driver will be called to make
+ * the second hop.
+ */
+ ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
+ &res);
+ force_space = !force_space;
+ if (ret == -ENOSPC)
+ continue;
+ if (ret)
+ return ret;
+
+bounce:
+ ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
+ if (ret == -EMULTIHOP) {
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ /* try and move to final place now. */
+ if (!ret)
+ goto bounce;
+ }
+ if (ret) {
+ ttm_resource_free(bo, &res);
+ return ret;
+ }
+
+ } while (ret && force_space);
- ret = ttm_bo_move_buffer(bo, placement, ctx);
/* For backward compatibility with userspace */
if (ret == -ENOSPC)
return -ENOMEM;
- if (ret)
- return ret;
/*
* We might need to add a TTM.
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 76027960054f..434cf0258000 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -27,6 +27,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
+#include <linux/debugfs.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 112438d965ff..6e1fd6985ffc 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index fb14f7716cf8..4a66b851b67d 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,8 +22,9 @@
* Authors: Christian König
*/
-#include <linux/iosys-map.h>
+#include <linux/debugfs.h>
#include <linux/io-mapping.h>
+#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_bo.h>
@@ -105,6 +106,7 @@ static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
pos->first = res;
pos->last = res;
} else {
+ WARN_ON(pos->first->bo->base.resv != res->bo->base.resv);
ttm_lru_bulk_move_pos_tail(pos, res);
}
}
@@ -295,11 +297,13 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
*
* @res: the resource to check
* @placement: the placement to check against
+ * @evicting: true if the caller is doing evictions
*
* Returns true if the placement is compatible.
*/
bool ttm_resource_compatible(struct ttm_resource *res,
- struct ttm_placement *placement)
+ struct ttm_placement *placement,
+ bool evicting)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -315,14 +319,20 @@ bool ttm_resource_compatible(struct ttm_resource *res,
if (res->mem_type != place->mem_type)
continue;
+ if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
+ !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
+ continue;
+
man = ttm_manager_type(bdev, res->mem_type);
if (man->func->compatible &&
!man->func->compatible(man, res, place, bo->base.size))
continue;
- if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
+ return true;
}
return false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 578a7c37f00b..474fe7aad2a0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,10 +32,11 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/cc_platform.h>
-#include <linux/sched.h>
-#include <linux/shmem_fs.h>
+#include <linux/debugfs.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 3debf37e7d9b..28b7ddce7747 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -115,14 +115,13 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- v3d_priv->enabled_ns[i] = 0;
- v3d_priv->start_ns[i] = 0;
- v3d_priv->jobs_sent[i] = 0;
-
sched = &v3d->queue[i].sched;
drm_sched_entity_init(&v3d_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
+
+ memset(&v3d_priv->stats[i], 0, sizeof(v3d_priv->stats[i]));
+ seqcount_init(&v3d_priv->stats[i].lock);
}
v3d_perfmon_open_file(v3d_priv);
@@ -144,6 +143,20 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
+void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
+ u64 *active_runtime, u64 *jobs_completed)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqcount_begin(&stats->lock);
+ *active_runtime = stats->enabled_ns;
+ if (stats->start_ns)
+ *active_runtime += timestamp - stats->start_ns;
+ *jobs_completed = stats->jobs_completed;
+ } while (read_seqcount_retry(&stats->lock, seq));
+}
+
static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct v3d_file_priv *file_priv = file->driver_priv;
@@ -151,20 +164,22 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
enum v3d_queue queue;
for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ struct v3d_stats *stats = &file_priv->stats[queue];
+ u64 active_runtime, jobs_completed;
+
+ v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed);
+
/* Note that, in case of a GPU reset, the time spent during an
* attempt of executing the job is not computed in the runtime.
*/
drm_printf(p, "drm-engine-%s: \t%llu ns\n",
- v3d_queue_to_string(queue),
- file_priv->start_ns[queue] ? file_priv->enabled_ns[queue]
- + timestamp - file_priv->start_ns[queue]
- : file_priv->enabled_ns[queue]);
+ v3d_queue_to_string(queue), active_runtime);
/* Note that we only count jobs that completed. Therefore, jobs
* that were resubmitted due to a GPU reset are not computed.
*/
drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
- v3d_queue_to_string(queue), file_priv->jobs_sent[queue]);
+ v3d_queue_to_string(queue), jobs_completed);
}
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 1950c723dde1..a2c516fe6d79 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -36,15 +36,27 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue)
return "UNKNOWN";
}
+struct v3d_stats {
+ u64 start_ns;
+ u64 enabled_ns;
+ u64 jobs_completed;
+
+ /*
+ * This seqcount is used to protect the access to the GPU stats
+ * variables. It must be used as, while we are reading the stats,
+ * IRQs can happen and the stats can be updated.
+ */
+ seqcount_t lock;
+};
+
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
- u64 start_ns;
- u64 enabled_ns;
- u64 jobs_sent;
+ /* Stores the GPU stats for this queue in the global context. */
+ struct v3d_stats stats;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -188,11 +200,8 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
- u64 start_ns[V3D_MAX_QUEUES];
-
- u64 enabled_ns[V3D_MAX_QUEUES];
-
- u64 jobs_sent[V3D_MAX_QUEUES];
+ /* Stores the GPU stats for a specific queue for this fd. */
+ struct v3d_stats stats[V3D_MAX_QUEUES];
};
struct v3d_bo {
@@ -508,6 +517,10 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
/* v3d_debugfs.c */
void v3d_debugfs_init(struct drm_minor *minor);
+/* v3d_drv.c */
+void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
+ u64 *active_runtime, u64 *jobs_completed);
+
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
@@ -543,6 +556,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
+void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index afc565078c78..da8faf3b9011 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -247,10 +247,11 @@ v3d_gem_init(struct drm_device *dev)
int ret, i;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- v3d->queue[i].fence_context = dma_fence_context_alloc(1);
- v3d->queue[i].start_ns = 0;
- v3d->queue[i].enabled_ns = 0;
- v3d->queue[i].jobs_sent = 0;
+ struct v3d_queue_state *queue = &v3d->queue[i];
+
+ queue->fence_context = dma_fence_context_alloc(1);
+ memset(&queue->stats, 0, sizeof(queue->stats));
+ seqcount_init(&queue->stats.lock);
}
spin_lock_init(&v3d->mm_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 2e04f6cb661e..d469bda52c1a 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -102,19 +102,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_BIN];
-
- file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
- file->jobs_sent[V3D_BIN]++;
- v3d->queue[V3D_BIN].jobs_sent++;
-
- file->start_ns[V3D_BIN] = 0;
- v3d->queue[V3D_BIN].start_ns = 0;
-
- file->enabled_ns[V3D_BIN] += runtime;
- v3d->queue[V3D_BIN].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -123,19 +112,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
-
- file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
- file->jobs_sent[V3D_RENDER]++;
- v3d->queue[V3D_RENDER].jobs_sent++;
-
- file->start_ns[V3D_RENDER] = 0;
- v3d->queue[V3D_RENDER].start_ns = 0;
-
- file->enabled_ns[V3D_RENDER] += runtime;
- v3d->queue[V3D_RENDER].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -144,19 +122,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_CSD];
-
- file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
- file->jobs_sent[V3D_CSD]++;
- v3d->queue[V3D_CSD].jobs_sent++;
-
- file->start_ns[V3D_CSD] = 0;
- v3d->queue[V3D_CSD].start_ns = 0;
-
- file->enabled_ns[V3D_CSD] += runtime;
- v3d->queue[V3D_CSD].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
@@ -192,19 +159,8 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
- struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
- u64 runtime = local_clock() - file->start_ns[V3D_TFU];
-
- file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
- file->jobs_sent[V3D_TFU]++;
- v3d->queue[V3D_TFU].jobs_sent++;
-
- file->start_ns[V3D_TFU] = 0;
- v3d->queue[V3D_TFU].start_ns = 0;
-
- file->enabled_ns[V3D_TFU] += runtime;
- v3d->queue[V3D_TFU].enabled_ns += runtime;
+ v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 54015ad765c7..7cd8c335cd9b 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -105,11 +105,51 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
+static void
+v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+{
+ struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_stats *local_stats = &file->stats[queue];
+ u64 now = local_clock();
+
+ write_seqcount_begin(&local_stats->lock);
+ local_stats->start_ns = now;
+ write_seqcount_end(&local_stats->lock);
+
+ write_seqcount_begin(&global_stats->lock);
+ global_stats->start_ns = now;
+ write_seqcount_end(&global_stats->lock);
+}
+
+static void
+v3d_stats_update(struct v3d_stats *stats, u64 now)
+{
+ write_seqcount_begin(&stats->lock);
+ stats->enabled_ns += now - stats->start_ns;
+ stats->jobs_completed++;
+ stats->start_ns = 0;
+ write_seqcount_end(&stats->lock);
+}
+
+void
+v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+{
+ struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_stats *local_stats = &file->stats[queue];
+ u64 now = local_clock();
+
+ v3d_stats_update(local_stats, now);
+ v3d_stats_update(global_stats, now);
+}
+
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
@@ -141,9 +181,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
job->start, job->end);
- file->start_ns[V3D_BIN] = local_clock();
- v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN];
-
+ v3d_job_start_stats(&job->base, V3D_BIN);
v3d_switch_perfmon(v3d, &job->base);
/* Set the current and end address of the control list.
@@ -168,7 +206,6 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -196,9 +233,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
- file->start_ns[V3D_RENDER] = local_clock();
- v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER];
-
+ v3d_job_start_stats(&job->base, V3D_RENDER);
v3d_switch_perfmon(v3d, &job->base);
/* XXX: Set the QCFG */
@@ -217,7 +252,6 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -232,8 +266,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
- file->start_ns[V3D_TFU] = local_clock();
- v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU];
+ v3d_job_start_stats(&job->base, V3D_TFU);
V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
@@ -260,7 +293,6 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
int i, csd_cfg0_reg, csd_cfg_reg_count;
@@ -279,9 +311,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
- file->start_ns[V3D_CSD] = local_clock();
- v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD];
-
+ v3d_job_start_stats(&job->base, V3D_CSD);
v3d_switch_perfmon(v3d, &job->base);
csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
@@ -530,8 +560,6 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_file_priv *file = job->base.file->driver_priv;
- u64 runtime;
v3d->cpu_job = job;
@@ -540,25 +568,13 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
return NULL;
}
- file->start_ns[V3D_CPU] = local_clock();
- v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
-
+ v3d_job_start_stats(&job->base, V3D_CPU);
trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
cpu_job_function[job->job_type](job);
trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
-
- runtime = local_clock() - file->start_ns[V3D_CPU];
-
- file->enabled_ns[V3D_CPU] += runtime;
- v3d->queue[V3D_CPU].enabled_ns += runtime;
-
- file->jobs_sent[V3D_CPU]++;
- v3d->queue[V3D_CPU].jobs_sent++;
-
- file->start_ns[V3D_CPU] = 0;
- v3d->queue[V3D_CPU].start_ns = 0;
+ v3d_job_update_stats(&job->base, V3D_CPU);
return NULL;
}
@@ -568,24 +584,12 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
- u64 runtime;
- file->start_ns[V3D_CACHE_CLEAN] = local_clock();
- v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN];
+ v3d_job_start_stats(job, V3D_CACHE_CLEAN);
v3d_clean_caches(v3d);
- runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN];
-
- file->enabled_ns[V3D_CACHE_CLEAN] += runtime;
- v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime;
-
- file->jobs_sent[V3D_CACHE_CLEAN]++;
- v3d->queue[V3D_CACHE_CLEAN].jobs_sent++;
-
- file->start_ns[V3D_CACHE_CLEAN] = 0;
- v3d->queue[V3D_CACHE_CLEAN].start_ns = 0;
+ v3d_job_update_stats(job, V3D_CACHE_CLEAN);
return NULL;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c
index d106845ba890..d610e355964f 100644
--- a/drivers/gpu/drm/v3d/v3d_sysfs.c
+++ b/drivers/gpu/drm/v3d/v3d_sysfs.c
@@ -15,16 +15,15 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
struct v3d_dev *v3d = to_v3d_dev(drm);
enum v3d_queue queue;
u64 timestamp = local_clock();
- u64 active_runtime;
ssize_t len = 0;
len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n");
for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
- if (v3d->queue[queue].start_ns)
- active_runtime = timestamp - v3d->queue[queue].start_ns;
- else
- active_runtime = 0;
+ struct v3d_stats *stats = &v3d->queue[queue].stats;
+ u64 active_runtime, jobs_completed;
+
+ v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed);
/* Each line will display the queue name, timestamp, the number
* of jobs sent to that queue and the runtime, as can be seem here:
@@ -38,9 +37,7 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n",
v3d_queue_to_string(queue),
- timestamp,
- v3d->queue[queue].jobs_sent,
- v3d->queue[queue].enabled_ns + active_runtime);
+ timestamp, jobs_completed, active_runtime);
}
return len;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 91dcf8d174d6..4801f8b64d3d 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -2,15 +2,15 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on DRM
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on PM
# Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
# happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
- depends on DRM
depends on SND && SND_SOC
- depends on COMMON_CLK
- depends on PM
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index ab61e96e7e14..08e29fa82563 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -5,6 +5,7 @@
#ifndef _VC4_DRV_H_
#define _VC4_DRV_H_
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/refcount.h>
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d8751ea20303..d30f8e8e8967 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -412,15 +412,14 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
/*
- * NOTE: This function should really be called with
- * vc4_hdmi->mutex held, but doing so results in reentrancy
- * issues since cec_s_phys_addr_from_edid might call
- * .adap_enable, which leads to that funtion being called with
- * our mutex held.
+ * NOTE: This function should really be called with vc4_hdmi->mutex
+ * held, but doing so results in reentrancy issues since
+ * cec_s_phys_addr() might call .adap_enable, which leads to that
+ * funtion being called with our mutex held.
*
* A similar situation occurs with vc4_hdmi_reset_link() that
* will call into our KMS hooks if the scrambling was enabled.
@@ -435,12 +434,16 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
return;
}
- edid = drm_get_edid(connector, vc4_hdmi->ddc);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(vc4_hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+
+ if (!drm_edid)
return;
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- kfree(edid);
+ drm_edid_free(drm_edid);
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
@@ -492,28 +495,29 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
+ const struct drm_edid *drm_edid;
int ret = 0;
- struct edid *edid;
/*
- * NOTE: This function should really take vc4_hdmi->mutex, but
- * doing so results in reentrancy issues since
- * cec_s_phys_addr_from_edid might call .adap_enable, which
- * leads to that funtion being called with our mutex held.
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in reentrancy issues since cec_s_phys_addr() might call
+ * .adap_enable, which leads to that funtion being called with our mutex
+ * held.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
* the lock for now.
*/
- edid = drm_get_edid(connector, vc4_hdmi->ddc);
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_s_phys_addr(vc4_hdmi->cec_adap,
+ connector->display_info.source_physical_address, false);
+ if (!drm_edid)
return 0;
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
@@ -2740,6 +2744,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
index = 1;
addr = of_get_address(dev->of_node, index, NULL, NULL);
+ if (!addr)
+ return -EINVAL;
vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 61e500b8c9da..40b4d084e3ce 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -61,9 +61,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
static int vkms_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
drm_calc_timestamping_constants(crtc, &crtc->mode);
@@ -88,10 +86,9 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
bool in_vblank_irq)
{
struct drm_device *dev = crtc->dev;
- unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
*vblank_time = ktime_get();
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index e94479d9cd5b..46a4ab688a7f 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o
+ vmwgfx_gem.o vmwgfx_vkms.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 6806c05e57f6..3353e97687d1 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -87,14 +87,11 @@ struct ttm_object_file {
*
* @object_lock: lock that protects idr.
*
- * @object_count: Per device object count.
- *
* This is the per-device data structure needed for ttm object management.
*/
struct ttm_object_device {
spinlock_t object_lock;
- atomic_t object_count;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
struct idr idr;
@@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops)
return NULL;
spin_lock_init(&tdev->object_lock);
- atomic_set(&tdev->object_count, 0);
/*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b..717d624e9a05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4..e5eb21a471a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af..f349642e6190 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d3e308fdfd5b..bdad93864b98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
+#include "vmwgfx_vkms.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
@@ -666,11 +667,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
@@ -910,6 +912,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"Please switch to a supported graphics device to avoid problems.");
}
+ vmw_vkms_init(dev_priv);
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
drm_info(&dev_priv->drm,
@@ -1195,6 +1199,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_svga_disable(dev_priv);
+ vmw_vkms_cleanup(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
@@ -1444,12 +1449,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
- root, "gmr_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
- root, "mob_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
- root, "system_mob_ttm");
+ if (vmw->has_gmr)
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+ root, "gmr_ttm");
+ if (vmw->has_mob) {
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+ root, "mob_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+ root, "system_mob_ttm");
+ }
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1624,6 +1632,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df6..4ecaea0026fc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -117,25 +117,8 @@ struct vmwgfx_hash_item {
unsigned long key;
};
-
-/**
- * struct vmw_validate_buffer - Carries validation info about buffers.
- *
- * @base: Validation info for TTM.
- * @hash: Hash entry for quick lookup of the TTM buffer object.
- *
- * This structure contains also driver private validation info
- * on top of the info needed by TTM.
- */
-struct vmw_validate_buffer {
- struct ttm_validate_buffer base;
- struct vmwgfx_hash_item hash;
- bool validate_as_mob;
-};
-
struct vmw_res_func;
-
/**
* struct vmw-resource - base class for hardware resources
*
@@ -445,15 +428,6 @@ struct vmw_sw_context{
struct vmw_legacy_display;
struct vmw_overlay;
-struct vmw_vga_topology_state {
- uint32_t width;
- uint32_t height;
- uint32_t primary;
- uint32_t pos_x;
- uint32_t pos_y;
-};
-
-
/*
* struct vmw_otable - Guest Memory OBject table metadata
*
@@ -501,7 +475,6 @@ struct vmw_private {
struct drm_device drm;
struct ttm_device bdev;
- struct drm_vma_offset_manager vma_manager;
u32 pci_id;
resource_size_t io_start;
resource_size_t vram_start;
@@ -642,6 +615,9 @@ struct vmw_private {
uint32 *devcaps;
+ bool vkms_enabled;
+ struct workqueue_struct *crc_workq;
+
/*
* mksGuestStat instance-descriptor and pid arrays
*/
@@ -836,6 +812,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
+int vmw_resource_clean(struct vmw_resource *res);
int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
@@ -1130,6 +1107,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111..07185c108218 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -30,6 +30,8 @@
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
+#include <linux/debugfs.h>
+
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
@@ -48,33 +50,20 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
{
}
-static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
{
- struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_bo *vbo = to_vmw_bo(obj);
- int ret;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- vmw_bo_pin_reserved(vbo, do_pin);
-
- ttm_bo_unreserve(bo);
-
-err:
- return ret;
-}
+ vmw_bo_pin_reserved(vbo, true);
-static int vmw_gem_object_pin(struct drm_gem_object *obj)
-{
- return vmw_gem_pin_private(obj, true);
+ return 0;
}
static void vmw_gem_object_unpin(struct drm_gem_object *obj)
{
- vmw_gem_pin_private(obj, false);
+ struct vmw_bo *vbo = to_vmw_bo(obj);
+
+ vmw_bo_pin_reserved(vbo, false);
}
static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
@@ -149,6 +138,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed4..13b2820cae51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,6 +27,7 @@
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -37,9 +38,16 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_edid.h>
+void vmw_du_init(struct vmw_display_unit *du)
+{
+ vmw_vkms_crtc_init(&du->crtc);
+}
+
void vmw_du_cleanup(struct vmw_display_unit *du)
{
struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
+
+ vmw_vkms_crtc_cleanup(&du->crtc);
drm_plane_cleanup(&du->primary);
if (vmw_cmd_supported(dev_priv))
drm_plane_cleanup(&du->cursor.base);
@@ -775,7 +783,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf;
- du->cursor_bo = vps->bo;
if (!vps->surf && !vps->bo) {
vmw_cursor_update_position(dev_priv, false, 0, 0);
@@ -858,15 +865,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
-
- if (!ret && new_fb) {
- struct drm_crtc *crtc = new_state->crtc;
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-
- vmw_connector_state_to_vcs(du->connector.state);
- }
-
-
return ret;
}
@@ -933,6 +931,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +939,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
@@ -965,15 +968,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ vmw_vkms_crtc_atomic_begin(crtc, state);
}
-
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-
/**
* vmw_du_crtc_duplicate_state - duplicate crtc state
* @crtc: DRM crtc
@@ -1361,7 +1358,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
@@ -1529,7 +1525,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
- vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -2040,6 +2035,29 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
"hotplug_mode_update", 0, 1);
}
+static void
+vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct vmw_private *vmw = vmw_priv(old_state->dev);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_tail(old_state);
+
+ if (vmw->vkms_enabled) {
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ (void)old_crtc_state;
+ flush_work(&du->vkms.crc_generator_work);
+ }
+ }
+}
+
+static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
+ .atomic_commit_tail = vmw_atomic_commit_tail,
+};
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -2059,6 +2077,7 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
+ dev->mode_config.helper_private = &vmw_mode_config_helpers;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e8..bf24f2f0dcfc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -217,21 +217,11 @@ struct vmw_kms_dirty {
struct vmw_framebuffer {
struct drm_framebuffer base;
bool bo;
- uint32_t user_handle;
-};
-
-/*
- * Clip rectangle
- */
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
};
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_bo *buffer;
- struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
@@ -243,10 +233,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
@@ -359,7 +349,6 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_bo *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -387,11 +376,25 @@ struct vmw_display_unit {
bool is_implicit;
int set_gui_x;
int set_gui_y;
-};
-struct vmw_validation_ctx {
- struct vmw_resource *res;
- struct vmw_bo *buf;
+ struct {
+ struct work_struct crc_generator_work;
+ struct hrtimer timer;
+ ktime_t period_ns;
+
+ /* protects concurrent access to the vblank handler */
+ atomic_t atomic_lock;
+ /* protected by @atomic_lock */
+ bool crc_enabled;
+ struct vmw_surface *surface;
+
+ /* protects concurrent access to the crc worker */
+ spinlock_t crc_state_lock;
+ /* protected by @crc_state_lock */
+ bool crc_pending;
+ u64 frame_start;
+ u64 frame_end;
+ } vkms;
};
#define vmw_crtc_to_du(x) \
@@ -403,6 +406,7 @@ struct vmw_validation_ctx {
/*
* Shared display unit functions - vmwgfx_kms.c
*/
+void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
@@ -489,8 +493,6 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state);
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state);
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
void vmw_du_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index c4db4aecca6c..5befc2719a49 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,7 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -241,33 +242,6 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
}
-/**
- * vmw_ldu_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed. Here's
- * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
- * but since for LDU the display plane is closely tied to the
- * CRTC, it makes more sense to do those at plane update time.
- */
-static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
- * vmw_ldu_crtc_atomic_disable - Turns off CRTC
- *
- * @crtc: CRTC to be turned off
- * @state: Unused
- */
-static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
@@ -276,6 +250,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
@@ -418,9 +395,9 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_ldu_crtc_atomic_enable,
- .atomic_disable = vmw_ldu_crtc_atomic_disable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
+ .atomic_disable = vmw_vkms_crtc_atomic_disable,
};
@@ -541,6 +518,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
dev_priv->implicit_placement_property,
1);
+ vmw_du_init(&ldu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c..c99cad444991 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index ca300c7427d2..848dba09981b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1064,6 +1064,22 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
end << PAGE_SHIFT);
}
+int vmw_resource_clean(struct vmw_resource *res)
+{
+ int ret = 0;
+
+ if (res->res_dirty) {
+ if (!res->func->clean)
+ return -EINVAL;
+
+ ret = res->func->clean(res);
+ if (ret)
+ return ret;
+ res->res_dirty = false;
+ }
+ return ret;
+}
+
/**
* vmw_resources_clean - Clean resources intersecting a mob range
* @vbo: The mob buffer object
@@ -1080,6 +1096,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
unsigned long res_start = start << PAGE_SHIFT;
unsigned long res_end = end << PAGE_SHIFT;
unsigned long last_cleaned = 0;
+ int ret;
/*
* Find the resource with lowest backup_offset that intersects the
@@ -1106,18 +1123,9 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
* intersecting the range.
*/
while (found) {
- if (found->res_dirty) {
- int ret;
-
- if (!found->func->clean)
- return -EINVAL;
-
- ret = found->func->clean(found);
- if (ret)
- return ret;
-
- found->res_dirty = false;
- }
+ ret = vmw_resource_clean(found);
+ if (ret)
+ return ret;
last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 30c3ad27b662..df0039a8ef29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -27,11 +27,13 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
@@ -89,7 +91,6 @@ struct vmw_kms_sou_define_gmrfb {
struct vmw_screen_object_unit {
struct vmw_display_unit base;
- unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_bo *buffer; /**< Backing store buffer */
bool defined;
@@ -240,7 +241,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
int x, y;
sou->buffer = vps->bo;
- sou->buffer_size = vps->bo_size;
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
@@ -255,7 +255,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} else {
sou->buffer = NULL;
- sou->buffer_size = 0;
}
}
@@ -271,19 +270,6 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
}
/**
- * vmw_sou_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed.
- */
-static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
* vmw_sou_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
@@ -305,6 +291,9 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+
if (sou->defined) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
if (ret)
@@ -320,6 +309,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
/*
@@ -797,8 +789,8 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_sou_crtc_atomic_enable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_sou_crtc_atomic_disable,
};
@@ -908,6 +900,9 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&sou->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 3c8414a13dba..2041c4d48daa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -27,12 +27,14 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -407,16 +409,6 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
crtc->x, crtc->y);
}
-
-static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -424,7 +416,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
struct vmw_screen_target_display_unit *stdu;
int ret;
-
if (!crtc) {
DRM_ERROR("CRTC is NULL\n");
return;
@@ -433,6 +424,9 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
if (ret)
@@ -770,7 +764,6 @@ out_unref:
return ret;
}
-
/*
* Screen Target CRTC dispatch table
*/
@@ -782,6 +775,12 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
+ .get_crc_sources = vmw_vkms_get_crc_sources,
+ .set_crc_source = vmw_vkms_set_crc_source,
+ .verify_crc_source = vmw_vkms_verify_crc_source,
};
@@ -1413,6 +1412,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
vmw_fence_obj_unreference(&fence);
}
+static void
+vmw_stdu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_set_crc_surface(crtc, stdu->display_srf);
+ vmw_vkms_crtc_atomic_flush(crtc, state);
+}
static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
@@ -1453,12 +1463,11 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = {
};
static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
- .prepare = vmw_stdu_crtc_helper_prepare,
.mode_set_nofb = vmw_stdu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_stdu_crtc_atomic_enable,
+ .atomic_flush = vmw_stdu_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_stdu_crtc_atomic_disable,
};
@@ -1575,6 +1584,9 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&stdu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb..621d98b376bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
deleted file mode 100644
index 90097d04b45f..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "vmwgfx_drv.h"
-
-static int vmw_bo_vm_lookup(struct ttm_device *bdev,
- struct drm_file *filp,
- unsigned long offset,
- unsigned long pages,
- struct ttm_buffer_object **p_bo)
-{
- struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
- struct drm_device *drm = &dev_priv->drm;
- struct drm_vma_offset_node *node;
- int ret;
-
- *p_bo = NULL;
-
- drm_vma_offset_lock_lookup(bdev->vma_manager);
-
- node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
- if (likely(node)) {
- *p_bo = container_of(node, struct ttm_buffer_object,
- base.vma_node);
- *p_bo = ttm_bo_get_unless_zero(*p_bo);
- }
-
- drm_vma_offset_unlock_lookup(bdev->vma_manager);
-
- if (!*p_bo) {
- drm_err(drm, "Could not find buffer object to map\n");
- return -EINVAL;
- }
-
- if (!drm_vma_node_is_allowed(node, filp)) {
- ret = -EACCES;
- goto out_no_access;
- }
-
- return 0;
-out_no_access:
- ttm_bo_put(*p_bo);
- return ret;
-}
-
-int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- static const struct vm_operations_struct vmw_vm_ops = {
- .pfn_mkwrite = vmw_bo_vm_mkwrite,
- .page_mkwrite = vmw_bo_vm_mkwrite,
- .fault = vmw_bo_vm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- };
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo;
- int ret;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
- return -EINVAL;
-
- ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_mmap_obj(vma, bo);
- if (unlikely(ret != 0))
- goto out_unref;
-
- vma->vm_ops = &vmw_vm_ops;
-
- /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
- if (!is_cow_mapping(vma->vm_flags))
- vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
-
- ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
-
- return 0;
-
-out_unref:
- ttm_bo_put(bo);
- return ret;
-}
-
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index aaacbdcbd742..e7625b3f71e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -32,9 +32,6 @@
#include <linux/slab.h>
-
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page;
-
- if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
- ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
- }
-
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (ctx->vm)
- ctx->vm_size_left -= PAGE_SIZE;
-
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
- if (ctx->vm && ctx->total_mem) {
- ctx->total_mem = 0;
- ctx->vm_size_left = 0;
- }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 240ee0c4ebfd..353d837907d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -52,10 +52,6 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
- * @vm: A pointer to the memory reservation interface or NULL if no
- * memory reservation is needed.
- * @vm_size_left: Amount of reserved memory that so far has not been allocated.
- * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct vmw_sw_context *sw_context;
@@ -68,9 +64,6 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
- struct vmw_validation_mem *vm;
- size_t vm_size_left;
- size_t total_mem;
};
struct vmw_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
new file mode 100644
index 000000000000..7e93a45948f7
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_vkms.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
+
+#include "vmw_surface_cache.h"
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+
+#define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable"
+
+static int
+vmw_surface_sync(struct vmw_private *vmw,
+ struct vmw_surface *surf)
+{
+ int ret;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+
+ vmw_resource_clean(&surf->res);
+
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed reserve\n", __func__);
+ goto done;
+ }
+
+ ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__);
+ ttm_bo_unreserve(&bo->tbo);
+ goto done;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&bo->tbo);
+done:
+ return ret;
+}
+
+static int
+compute_crc(struct drm_crtc *crtc,
+ struct vmw_surface *surf,
+ u32 *crc)
+{
+ u8 *mapped_surface;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(surf->metadata.format);
+ u32 row_pitch_bytes;
+ SVGA3dSize blocks;
+ u32 y;
+
+ *crc = 0;
+
+ vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks);
+ row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock;
+ WARN_ON(!bo);
+ mapped_surface = vmw_bo_map_and_cache(bo);
+
+ for (y = 0; y < blocks.height; y++) {
+ *crc = crc32_le(*crc, mapped_surface, row_pitch_bytes);
+ mapped_surface += row_pitch_bytes;
+ }
+
+ vmw_bo_unmap(bo);
+
+ return 0;
+}
+
+static void
+crc_generate_worker(struct work_struct *work)
+{
+ struct vmw_display_unit *du =
+ container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ bool crc_pending;
+ u64 frame_start, frame_end;
+ u32 crc32 = 0;
+ struct vmw_surface *surf = 0;
+ int ret;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ crc_pending = du->vkms.crc_pending;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ surf = du->vkms.surface;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ if (vmw_surface_sync(vmw, surf)) {
+ drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
+ return;
+ }
+
+ ret = compute_crc(crtc, surf, &crc32);
+ if (ret)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ frame_start = du->vkms.frame_start;
+ frame_end = du->vkms.frame_end;
+ crc_pending = du->vkms.crc_pending;
+ du->vkms.frame_start = 0;
+ du->vkms.frame_end = 0;
+ du->vkms.crc_pending = false;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+}
+
+static enum hrtimer_restart
+vmw_vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct vmw_surface *surf = NULL;
+ u64 ret_overrun;
+ bool locked, ret;
+
+ ret_overrun = hrtimer_forward_now(&du->vkms.timer,
+ du->vkms.period_ns);
+ if (ret_overrun != 1)
+ drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n",
+ ret_overrun - 1);
+
+ locked = vmw_vkms_vblank_trylock(crtc);
+ ret = drm_crtc_handle_vblank(crtc);
+ WARN_ON(!ret);
+ if (!locked)
+ return HRTIMER_RESTART;
+ surf = du->vkms.surface;
+ vmw_vkms_unlock(crtc);
+
+ if (du->vkms.crc_enabled && surf) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ spin_lock(&du->vkms.crc_state_lock);
+ if (!du->vkms.crc_pending)
+ du->vkms.frame_start = frame;
+ else
+ drm_dbg_driver(crtc->dev,
+ "crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ du->vkms.frame_start, frame);
+ du->vkms.frame_end = frame;
+ du->vkms.crc_pending = true;
+ spin_unlock(&du->vkms.crc_state_lock);
+
+ ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work);
+ if (!ret)
+ drm_dbg_driver(crtc->dev, "Composer worker already queued\n");
+ }
+
+ return HRTIMER_RESTART;
+}
+
+void
+vmw_vkms_init(struct vmw_private *vmw)
+{
+ char buffer[64];
+ const size_t max_buf_len = sizeof(buffer) - 1;
+ size_t buf_len = max_buf_len;
+ int ret;
+
+ vmw->vkms_enabled = false;
+
+ ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len);
+ if (ret || buf_len > max_buf_len)
+ return;
+ buffer[buf_len] = '\0';
+
+ ret = kstrtobool(buffer, &vmw->vkms_enabled);
+ if (!ret && vmw->vkms_enabled) {
+ ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS);
+ vmw->vkms_enabled = (ret == 0);
+ }
+
+ vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0);
+ if (!vmw->crc_workq) {
+ drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms.");
+ vmw->vkms_enabled = false;
+ }
+ if (vmw->vkms_enabled)
+ drm_info(&vmw->drm, "VKMS enabled\n");
+}
+
+void
+vmw_vkms_cleanup(struct vmw_private *vmw)
+{
+ destroy_workqueue(vmw->crc_workq);
+}
+
+bool
+vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = crtc->index;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
+ *vblank_time = READ_ONCE(du->vkms.timer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= du->vkms.period_ns;
+
+ return true;
+}
+
+int
+vmw_vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!vmw->vkms_enabled)
+ return -EINVAL;
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+void
+vmw_vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ hrtimer_cancel(&du->vkms.timer);
+ du->vkms.surface = NULL;
+ du->vkms.period_ns = ktime_set(0, 0);
+}
+
+enum vmw_vkms_lock_state {
+ VMW_VKMS_LOCK_UNLOCKED = 0,
+ VMW_VKMS_LOCK_MODESET = 1,
+ VMW_VKMS_LOCK_VBLANK = 2
+};
+
+void
+vmw_vkms_crtc_init(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+ spin_lock_init(&du->vkms.crc_state_lock);
+
+ INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker);
+ du->vkms.surface = NULL;
+}
+
+void
+vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ WARN_ON(work_pending(&du->vkms.crc_generator_work));
+ hrtimer_cancel(&du->vkms.timer);
+}
+
+void
+vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_modeset_lock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ unsigned long flags;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+
+ vmw_vkms_unlock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_on(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+}
+
+static bool
+is_crc_supported(struct drm_crtc *crtc)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (vmw->active_display_unit != vmw_du_screen_target)
+ return false;
+
+ return true;
+}
+
+static const char * const pipe_crc_sources[] = {"auto"};
+
+static int
+crc_parse_source(const char *src_name,
+ bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+const char *const *
+vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = 0;
+ if (!is_crc_supported(crtc))
+ return NULL;
+
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int
+vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ if (crc_parse_source(src_name, &enabled) < 0) {
+ drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int
+vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool enabled, prev_enabled, locked;
+ int ret;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ ret = crc_parse_source(src_name, &enabled);
+
+ if (enabled)
+ drm_crtc_vblank_get(crtc);
+
+ locked = vmw_vkms_modeset_lock_relaxed(crtc);
+ prev_enabled = du->vkms.crc_enabled;
+ du->vkms.crc_enabled = enabled;
+ if (locked)
+ vmw_vkms_unlock(crtc);
+
+ if (prev_enabled)
+ drm_crtc_vblank_put(crtc);
+
+ return ret;
+}
+
+void
+vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled) {
+ WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
+ du->vkms.surface = surf;
+ }
+}
+
+/**
+ * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
+ * @du: The vmw_display_unit from which to grab the vblank timings
+ *
+ * Returns the maximum wait time used to acquire the vkms lock. By
+ * default uses a time of a single frame and in case where vblank
+ * was not initialized for the display unit 1/60th of a second.
+ */
+static inline u64
+vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du)
+{
+ s64 nsecs = ktime_to_ns(du->vkms.period_ns);
+
+ return (nsecs > 0) ? nsecs : 16666666;
+}
+
+/**
+ * vmw_vkms_modeset_lock - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * This function prevents the VKMS timers/callbacks from being called
+ * while a modeset operation is in process. We don't want the callbacks
+ * e.g. the vblank simulator to be trying to access incomplete state
+ * so we need to make sure they execute only when the modeset has
+ * finished.
+ *
+ * Normally this would have been done with a spinlock but locking the
+ * entire atomic modeset with vmwgfx is impossible because kms prepare
+ * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
+ * guard various bits of state). Which means that we need to synchronize
+ * atomic context (the vblank handler) with the non-atomic entirity
+ * of kms - so use an atomic_t to track which part of vkms has access
+ * to the basic vkms state.
+ */
+void
+vmw_vkms_modeset_lock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n",
+ total_delay, ret, atomic_read(&du->vkms.atomic_lock));
+ }
+}
+
+/**
+ * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * Much like vmw_vkms_modeset_lock except that when the crtc is currently
+ * in a modeset it will return immediately.
+ *
+ * Returns true if actually locked vkms to modeset or false otherwise.
+ */
+bool
+vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED ||
+ ret == VMW_VKMS_LOCK_MODESET ||
+ total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS relaxed lock expired!\n");
+ return false;
+ }
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+/**
+ * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
+ * @crtc: The crtc to lock for vkms
+ *
+ * Tries to lock vkms for vblank, returns immediately.
+ *
+ * Returns true if locked vkms to vblank or false otherwise.
+ */
+bool
+vmw_vkms_vblank_trylock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ u32 ret;
+
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_VBLANK);
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+void
+vmw_vkms_unlock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ /* Release flag; mark it as unlocked. */
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
new file mode 100644
index 000000000000..69ddd33a8444
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_VKMS_H_
+#define VMWGFX_VKMS_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+struct vmw_private;
+struct vmw_surface;
+
+void vmw_vkms_init(struct vmw_private *vmw);
+void vmw_vkms_cleanup(struct vmw_private *vmw);
+
+void vmw_vkms_modeset_lock(struct drm_crtc *crtc);
+bool vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc);
+bool vmw_vkms_vblank_trylock(struct drm_crtc *crtc);
+void vmw_vkms_unlock(struct drm_crtc *crtc);
+
+bool vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+int vmw_vkms_enable_vblank(struct drm_crtc *crtc);
+void vmw_vkms_disable_vblank(struct drm_crtc *crtc);
+
+void vmw_vkms_crtc_init(struct drm_crtc *crtc);
+void vmw_vkms_crtc_cleanup(struct drm_crtc *crtc);
+void vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+
+const char *const *vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+int vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
+int vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name);
+void vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf);
+
+#endif
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 1a556d087e63..782934be0a77 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on (m || (y && KUNIT=y))
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HDCP_HELPER
+ depends on DRM_DISPLAY_HDMI_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on MMU
+ depends on PCI
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -13,10 +20,6 @@ config DRM_XE
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
select DRM_SUBALLOC_HELPER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDCP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
select DRM_MIPI_DSI
select RELAY
select IRQ_WORK
@@ -26,6 +29,7 @@ config DRM_XE
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
select ACPI_BUTTON if ACPI
+ select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
select IOSF_MBI
@@ -41,6 +45,7 @@ config DRM_XE
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
+ select HMM_MIRROR
help
Experimental driver for Intel Xe series GPUs
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5a428ca00f10..8321ec4f9b46 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -49,6 +49,7 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
uses_generated_oob := \
$(obj)/xe_gsc.o \
$(obj)/xe_guc.o \
+ $(obj)/xe_guc_ads.o \
$(obj)/xe_migrate.o \
$(obj)/xe_ring_ops.o \
$(obj)/xe_vm.o \
@@ -97,6 +98,8 @@ xe-y += xe_bb.o \
xe_guc_db_mgr.o \
xe_guc_debugfs.o \
xe_guc_hwconfig.o \
+ xe_guc_id_mgr.o \
+ xe_guc_klv_helpers.o \
xe_guc_log.o \
xe_guc_pc.o \
xe_guc_submit.o \
@@ -145,6 +148,8 @@ xe-y += xe_bb.o \
xe_wa.o \
xe_wopcm.o
+xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
@@ -155,9 +160,14 @@ xe-y += \
xe_sriov.o
xe-$(CONFIG_PCI_IOV) += \
+ xe_gt_sriov_pf.o \
+ xe_gt_sriov_pf_config.o \
+ xe_gt_sriov_pf_control.o \
+ xe_gt_sriov_pf_policy.o \
xe_lmtt.o \
xe_lmtt_2l.o \
- xe_lmtt_ml.o
+ xe_lmtt_ml.o \
+ xe_sriov_pf.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -172,9 +182,6 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
-CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init)
-
# Rule to build SOC code shared with i915
$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
$(call cmd,force_checksrc)
@@ -257,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
i915-display/intel_hdcp.o \
+ i915-display/intel_hdcp_gsc_message.o \
i915-display/intel_hdmi.o \
i915-display/intel_hotplug.o \
i915-display/intel_hotplug_irq.o \
@@ -278,6 +286,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_vdsc.o \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
+ i915-display/intel_dmc_wl.o \
i915-display/intel_wm.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index 5496a5890847..c1ad09b36453 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _GUC_ACTIONS_PF_ABI_H
-#define _GUC_ACTIONS_PF_ABI_H
+#ifndef _ABI_GUC_ACTIONS_SRIOV_ABI_H
+#define _ABI_GUC_ACTIONS_SRIOV_ABI_H
#include "guc_communication_ctb_abi.h"
@@ -171,4 +171,200 @@
#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+/**
+ * DOC: GUC2PF_VF_STATE_NOTIFY
+ *
+ * The GUC2PF_VF_STATE_NOTIFY message is used by the GuC to notify PF about change
+ * of the VF state.
+ *
+ * This G2H message is sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_VF_STATE_NOTIFY` = 0x5106 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | DATA2 = **EVENT** - notification event: |
+ * | | | |
+ * | | | - _`GUC_PF_NOTIFY_VF_ENABLE` = 1 (only if VFID = 0) |
+ * | | | - _`GUC_PF_NOTIFY_VF_FLR` = 1 |
+ * | | | - _`GUC_PF_NOTIFY_VF_FLR_DONE` = 2 |
+ * | | | - _`GUC_PF_NOTIFY_VF_PAUSE_DONE` = 3 |
+ * | | | - _`GUC_PF_NOTIFY_VF_FIXUP_DONE` = 4 |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_GUC2PF_VF_STATE_NOTIFY 0x5106u
+
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC_PF_NOTIFY_VF_ENABLE 1u
+#define GUC_PF_NOTIFY_VF_FLR 1u
+#define GUC_PF_NOTIFY_VF_FLR_DONE 2u
+#define GUC_PF_NOTIFY_VF_PAUSE_DONE 3u
+#define GUC_PF_NOTIFY_VF_FIXUP_DONE 4u
+
+/**
+ * DOC: PF2GUC_UPDATE_VGT_POLICY
+ *
+ * This message is used by the PF to set `GuC VGT Policy KLVs`_.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY` = 0x5502 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that |
+ * | | | represents the start of `GuC VGT Policy KLVs`_ list. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of above offset. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNT** - number of KLVs successfully applied |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY 0x5502u
+
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_1_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_2_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_3_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: PF2GUC_UPDATE_VF_CFG
+ *
+ * The `PF2GUC_UPDATE_VF_CFG`_ message is used by PF to provision single VF in GuC.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VF_CFG` = 0x5503 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - identifier of the VF that the KLV |
+ * | | | configurations are being applied to |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that represents |
+ * | | | the start of a list of virtualization related KLV configs |
+ * | | | that are to be applied to the VF. |
+ * | | | If this parameter is zero, the list is not parsed. |
+ * | | | If full configs address parameter is zero and configs_size is|
+ * | | | zero associated VF config shall be reset to its default state|
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of configs address. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 4 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNT** - number of KLVs successfully applied |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_UPDATE_VF_CFG 0x5503u
+
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 4u)
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_2_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_3_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_4_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: PF2GUC_VF_CONTROL
+ *
+ * The PF2GUC_VF_CONTROL message is used by the PF to trigger VF state change
+ * maintained by the GuC.
+ *
+ * This H2G message must be sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_VF_CONTROL_CMD` = 0x5506 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | DATA2 = **COMMAND** - control command: |
+ * | | | |
+ * | | | - _`GUC_PF_TRIGGER_VF_PAUSE` = 1 |
+ * | | | - _`GUC_PF_TRIGGER_VF_RESUME` = 2 |
+ * | | | - _`GUC_PF_TRIGGER_VF_STOP` = 3 |
+ * | | | - _`GUC_PF_TRIGGER_VF_FLR_START` = 4 |
+ * | | | - _`GUC_PF_TRIGGER_VF_FLR_FINISH` = 5 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_VF_CONTROL 0x5506u
+
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC_PF_TRIGGER_VF_PAUSE 1u
+#define GUC_PF_TRIGGER_VF_RESUME 2u
+#define GUC_PF_TRIGGER_VF_STOP 3u
+#define GUC_PF_TRIGGER_VF_FLR_START 4u
+#define GUC_PF_TRIGGER_VF_FLR_FINISH 5u
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0400bc0fccdc..511cf974d585 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -319,4 +319,14 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+/*
+ * Workaround keys:
+ */
+enum xe_guc_klv_ids {
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 420eba0e4be0..ffaa4d2f1eed 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -19,14 +19,12 @@
#include "xe_bo.h"
#include "xe_pm.h"
#include "xe_step.h"
-#include "i915_gem.h"
#include "i915_gem_stolen.h"
#include "i915_gpu_error.h"
#include "i915_reg_defs.h"
#include "i915_utils.h"
#include "intel_gt_types.h"
#include "intel_step.h"
-#include "intel_uc_fw.h"
#include "intel_uncore.h"
#include "intel_runtime_pm.h"
#include <linux/pm_runtime.h>
@@ -41,12 +39,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
return dev_get_drvdata(kdev);
}
-
-#define INTEL_JASPERLAKE 0
-#define INTEL_ELKHARTLAKE 0
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define INTEL_DEVID(dev_priv) ((dev_priv)->info.devid)
#define IS_I830(dev_priv) (dev_priv && 0)
#define IS_I845G(dev_priv) (dev_priv && 0)
#define IS_I85X(dev_priv) (dev_priv && 0)
@@ -85,11 +79,10 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
-#define IS_XEHPSDV(dev_priv) (dev_priv && 0)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
-#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
+#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
@@ -97,19 +90,12 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define INTEL_DISPLAY_ENABLED(xe) (HAS_DISPLAY((xe)) && !intel_opregion_headless_sku((xe)))
-
-#define IS_GRAPHICS_VER(xe, first, last) \
- ((xe)->info.graphics_verx100 >= first * 100 && \
- (xe)->info.graphics_verx100 <= (last*100 + 99))
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_LLC(xe) (!IS_DGFX((xe)))
#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
/* Workarounds not handled yet */
#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; })
-#define IS_GRAPHICS_STEP(xe, first, last) ({u8 __step = (xe)->info.step.graphics; first <= __step && __step <= last; })
#define IS_LP(xe) (0)
#define IS_GEN9_LP(xe) (0)
@@ -126,27 +112,6 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_KABYLAKE_ULT(xe) (xe && 0)
#define IS_SKYLAKE_ULT(xe) (xe && 0)
-#define IS_DG1_GRAPHICS_STEP(xe, first, last) (IS_DG1(xe) && IS_GRAPHICS_STEP(xe, first, last))
-#define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \
- ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \
- IS_GRAPHICS_STEP(xe, first, last))
-#define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last))
-
-/* XXX: No basedie stepping support yet */
-#define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe))
-
-#define IS_TIGERLAKE_DISPLAY_STEP(xe, first, last) (IS_TIGERLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ROCKETLAKE_DISPLAY_STEP(xe, first, last) (IS_ROCKETLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_DG1_DISPLAY_STEP(xe, first, last) (IS_DG1(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_DG2_DISPLAY_STEP(xe, first, last) (IS_DG2(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ADLP_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_P(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_ADLS_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_S(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_JSL_EHL_DISPLAY_STEP(xe, first, last) (IS_JSL_EHL(xe) && IS_DISPLAY_STEP(xe, first, last))
-#define IS_MTL_DISPLAY_STEP(xe, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-
-/* FIXME: Add subplatform here */
-#define IS_MTL_GRAPHICS_STEP(xe, sub, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
-
#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
@@ -154,30 +119,31 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
#define to_intel_bo(x) gem_to_xe_bo((x))
-#define mkwrite_device_info(xe) (INTEL_INFO(xe))
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#define intel_has_gpu_reset(a) (a && 0)
-
#include "intel_wakeref.h"
static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- if (xe_pm_runtime_get(xe) < 0) {
- xe_pm_runtime_put(xe);
- return 0;
- }
- return 1;
+ return xe_pm_runtime_resume_and_get(xe);
}
static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_get_if_active(xe);
+ return xe_pm_runtime_get_if_in_use(xe);
+}
+
+static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ xe_pm_runtime_get_noresume(xe);
+ return true;
}
static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
@@ -217,7 +183,6 @@ struct i915_sched_attr {
#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
-#define HPD_STORM_DEFAULT_THRESHOLD 50
#ifdef CONFIG_ARM64
/*
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
deleted file mode 100644
index 12c671fd5235..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../i915/i915_fixed.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
deleted file mode 100644
index 06b723a479c5..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __I915_GEM_H__
-#define __I915_GEM_H__
-#define GEM_BUG_ON
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
index bd233007c1b7..cb6c7598824b 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
@@ -17,10 +17,15 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
{
struct xe_bo *bo;
int err;
- u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+ u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
- if (align)
+ if (start < SZ_4K)
+ start = SZ_4K;
+
+ if (align) {
size = ALIGN(size, align);
+ start = ALIGN(start, align);
+ }
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
index 80b024d435dc..4931c7198f13 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
@@ -9,36 +9,10 @@
#include <linux/types.h>
struct drm_i915_private;
-struct i915_ggtt;
-static inline void intel_vgpu_detect(struct drm_i915_private *i915)
-{
-}
static inline bool intel_vgpu_active(struct drm_i915_private *i915)
{
return false;
}
-static inline void intel_vgpu_register(struct drm_i915_private *i915)
-{
-}
-static inline bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915)
-{
- return false;
-}
-static inline int intel_vgt_balloon(struct i915_ggtt *ggtt)
-{
- return 0;
-}
-static inline void intel_vgt_deballoon(struct i915_ggtt *ggtt)
-{
-}
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
deleted file mode 100644
index 009745328992..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _INTEL_UC_FW_H_
-#define _INTEL_UC_FW_H_
-
-#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index cd26ddc0f69e..ef79793caa72 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -25,15 +25,15 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
-static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
- i915_reg_t i915_reg)
+static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
}
-static inline u32 intel_uncore_read16(struct intel_uncore *uncore,
+static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index b21da7b745a5..e18521acc516 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -11,7 +11,7 @@
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{
- if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
+ if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
xe_bo_unpin(bo);
@@ -31,23 +31,27 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
- return ret;
+ goto err;
- if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
+ if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
/*
- * XE_BO_SCANOUT_BIT should ideally be set at creation, or is
+ * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound.
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
- bo->flags |= XE_BO_SCANOUT_BIT;
+ bo->flags |= XE_BO_FLAG_SCANOUT;
}
ttm_bo_unreserve(&bo->ttm);
+ return 0;
+err:
+ xe_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 51ae3561fd0d..9e4bcfdbc7e5 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size,
- ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_PINNED_BIT);
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else
@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
- ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
- XE_BO_CREATE_PINNED_BIT);
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
+ XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
- if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
+ if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
+ if (obj->flags & XE_BO_FLAG_STOLEN)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else
info->fix.smem_start =
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3..0de0566e5b39 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -51,14 +51,6 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
return intel_display_driver_probe_defer(pdev);
}
-static void xe_display_last_close(struct drm_device *dev)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- if (xe->info.enable_display)
- intel_fbdev_restore_mode(to_xe_device(dev));
-}
-
/**
* xe_display_driver_set_hooks - Add driver flags and hooks for display
* @driver: DRM device driver
@@ -73,7 +65,6 @@ void xe_display_driver_set_hooks(struct drm_driver *driver)
return;
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
- driver->lastclose = xe_display_last_close;
}
static void unset_display_features(struct xe_device *xe)
@@ -101,25 +92,14 @@ static void display_destroy(struct drm_device *dev, void *dummy)
*/
int xe_display_create(struct xe_device *xe)
{
- int err;
-
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
- drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
- drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
- drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
- err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
@@ -223,9 +203,7 @@ void xe_display_fini(struct xe_device *xe)
if (!xe->info.enable_display)
return;
- /* poll work can call into fbdev, hence clean that up afterwards */
intel_hpd_poll_fini(xe);
- intel_fbdev_fini(xe);
intel_hdcp_component_fini(xe);
intel_audio_deinit(xe);
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 27c2fb1c002a..44c9fd2143cc 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 722c84a56607..3e1ae37c4c8b 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -10,6 +10,7 @@
#include "intel_fb_pin.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_pm.h"
#include <drm/ttm/ttm_bo.h>
@@ -30,7 +31,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
iosys_map_wr(map, *dpt_ofs, u64, pte);
*dpt_ofs += 8;
@@ -62,7 +63,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
for (column = 0; column < width; column++) {
iosys_map_wr(map, *dpt_ofs, u64,
pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]));
+ xe->pat.idx[XE_CACHE_NONE]));
*dpt_ofs += 8;
src_idx++;
@@ -99,18 +100,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
if (IS_DGFX(xe))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM0_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
else
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -119,7 +123,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
}
@@ -165,7 +169,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte);
*ggtt_ofs += XE_PAGE_SIZE;
@@ -190,7 +194,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -211,7 +215,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
for (x = 0; x < size; x += XE_PAGE_SIZE) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_WB]);
+ xe->pat.idx[XE_CACHE_NONE]);
xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
}
@@ -238,11 +242,10 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
rot_info->plane[i].dst_stride);
}
- xe_ggtt_invalidate(ggtt);
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return ret;
}
@@ -260,7 +263,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
- !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
+ !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
struct xe_tile *tile = xe_device_get_root_tile(xe);
/*
@@ -321,7 +324,7 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
xe_bo_unpin_map_no_vm(vma->dpt);
else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
vma->bo->ggtt_node.start != vma->node.start)
- xe_ggtt_remove_node(ggtt, &vma->node);
+ xe_ggtt_remove_node(ggtt, &vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
ttm_bo_unpin(&vma->bo->ttm);
@@ -353,7 +356,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */
- drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
+ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma))
@@ -381,4 +384,4 @@ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
void intel_dpt_destroy(struct i915_address_space *vm)
{
return;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 0f11a39333e2..d46f87a039f2 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -3,32 +3,250 @@
* Copyright 2023, Intel Corporation.
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+#include <drm/i915_hdcp_interface.h>
+#include <linux/delay.h>
+
+#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gsc_proxy.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_map.h"
+#include "xe_pm.h"
+#include "xe_uc_fw.h"
+
+#define HECI_MEADDRESS_HDCP 18
+
+struct intel_hdcp_gsc_message {
+ struct xe_bo *hdcp_bo;
+ u64 hdcp_cmd_in;
+ u64 hdcp_cmd_out;
+};
-bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
+
+bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
{
- return true;
+ return DISPLAY_VER(xe) >= 14;
}
-bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915)
+bool intel_hdcp_gsc_check_status(struct xe_device *xe)
{
- return false;
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_gt *gt = tile->media_gt;
+ bool ret = true;
+
+ if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
+ return false;
+
+ xe_pm_runtime_get(xe);
+ if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
+ drm_dbg_kms(&xe->drm,
+ "failed to get forcewake to check proxy status\n");
+ ret = false;
+ goto out;
+ }
+
+ if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
+ ret = false;
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+out:
+ xe_pm_runtime_put(xe);
+ return ret;
}
-int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+/*This function helps allocate memory for the command that we will send to gsc cs */
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_message *hdcp_message)
{
- drm_info(&i915->drm, "HDCP support not yet implemented\n");
- return -ENODEV;
+ struct xe_bo *bo = NULL;
+ u64 cmd_in, cmd_out;
+ int ret = 0;
+
+ /* allocate object of two page for HDCP command memory and store it */
+ bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
+
+ if (IS_ERR(bo)) {
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ ret = PTR_ERR(bo);
+ goto out;
+ }
+
+ cmd_in = xe_bo_ggtt_addr(bo);
+ cmd_out = cmd_in + PAGE_SIZE;
+ xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
+
+ hdcp_message->hdcp_bo = bo;
+ hdcp_message->hdcp_cmd_in = cmd_in;
+ hdcp_message->hdcp_cmd_out = cmd_out;
+out:
+ return ret;
}
-void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
{
+ struct intel_hdcp_gsc_message *hdcp_message;
+ int ret;
+
+ hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
+
+ if (!hdcp_message)
+ return -ENOMEM;
+
+ /*
+ * NOTE: No need to lock the comp mutex here as it is already
+ * going to be taken before this function called
+ */
+ ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
+ if (ret) {
+ drm_err(&xe->drm, "Could not initialize hdcp_message\n");
+ kfree(hdcp_message);
+ return ret;
+ }
+
+ xe->display.hdcp.hdcp_message = hdcp_message;
+ return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct xe_device *xe)
+{
+ struct i915_hdcp_arbiter *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_lock(&xe->display.hdcp.hdcp_mutex);
+ xe->display.hdcp.arbiter = data;
+ xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
+ xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(xe);
+ if (ret)
+ kfree(data);
+
+ mutex_unlock(&xe->display.hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct xe_device *xe)
+{
+ struct intel_hdcp_gsc_message *hdcp_message =
+ xe->display.hdcp.hdcp_message;
+
+ if (!hdcp_message)
+ return;
+
+ xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
+ kfree(hdcp_message);
+}
+
+static int xe_gsc_send_sync(struct xe_device *xe,
+ struct intel_hdcp_gsc_message *hdcp_message,
+ u32 msg_size_in, u32 msg_size_out,
+ u32 addr_out_off)
+{
+ struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+ int ret;
+
+ ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
+ hdcp_message->hdcp_cmd_out, msg_size_out);
+ if (ret) {
+ drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
+ return ret;
+ }
+
+ if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off))
+ return -EAGAIN;
+
+ ret = xe_gsc_read_out_header(xe, map, addr_out_off,
+ sizeof(struct hdcp_cmd_header), NULL);
+
+ return ret;
+}
+
+ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len)
{
- return -ENODEV;
+ const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
+ struct intel_hdcp_gsc_message *hdcp_message;
+ u64 host_session_id;
+ u32 msg_size_in, msg_size_out;
+ u32 addr_out_off, addr_in_wr_off = 0;
+ int ret, tries = 0;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
+ msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
+ hdcp_message = xe->display.hdcp.hdcp_message;
+ addr_out_off = PAGE_SIZE;
+
+ host_session_id = xe_gsc_create_host_session_id();
+ xe_pm_runtime_get_noresume(xe);
+ addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off, HECI_MEADDRESS_HDCP,
+ host_session_id, msg_in_len);
+ xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ msg_in, msg_in_len);
+ /*
+ * Keep sending request in case the pending bit is set no need to add
+ * message handle as we are using same address hence loc. of header is
+ * same and it will contain the message handle. we will send the message
+ * 20 times each message 50 ms apart
+ */
+ do {
+ ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ addr_out_off);
+
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(50);
+
+ } while (++tries < 20);
+
+ if (ret)
+ goto out;
+
+ xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ addr_out_off + HDCP_GSC_HEADER_SIZE,
+ msg_out_len);
+
+out:
+ xe_pm_runtime_put(xe);
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 866d1dd6eeb4..9693c56d386b 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -6,6 +6,7 @@
/* for ioread64 */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "regs/xe_gtt_defs.h"
#include "xe_ggtt.h"
#include "i915_drv.h"
@@ -62,7 +63,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
+ flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
@@ -79,7 +80,7 @@ initial_plane_bo(struct xe_device *xe,
}
phys_base = pte & ~(page_size - 1);
- flags |= XE_BO_CREATE_VRAM0_BIT;
+ flags |= XE_BO_FLAG_VRAM0;
/*
* We don't currently expect this to ever be placed in the
@@ -101,7 +102,7 @@ initial_plane_bo(struct xe_device *xe,
if (!stolen)
return NULL;
phys_base = base;
- flags |= XE_BO_CREATE_STOLEN_BIT;
+ flags |= XE_BO_FLAG_STOLEN;
/*
* If the FB is too big, just don't use it since fbdev is not very
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h
new file mode 100644
index 000000000000..dca62af5a5d5
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GFX_STATE_COMMANDS_H_
+#define _XE_GFX_STATE_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+#define GFX_STATE_OPCODE REG_GENMASK(28, 26)
+
+#define GFX_STATE_CMD(opcode) \
+ (XE_INSTR_GFX_STATE | REG_FIELD_PREP(GFX_STATE_OPCODE, opcode))
+
+#define STATE_WRITE_INLINE GFX_STATE_CMD(0x0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 8e6dd061f2ae..31d28a67ef6a 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -47,6 +47,8 @@
#define GPGPU_CSR_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x4)
#define STATE_COMPUTE_MODE GFXPIPE_COMMON_CMD(0x1, 0x5)
#define CMD_3DSTATE_BTD GFXPIPE_COMMON_CMD(0x1, 0x6)
+#define STATE_SYSTEM_MEM_FENCE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x9)
+#define STATE_CONTEXT_DATA_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0xB)
#define CMD_3DSTATE_VF_STATISTICS GFXPIPE_SINGLE_DW_CMD(0x0, 0xB)
@@ -71,6 +73,7 @@
#define CMD_3DSTATE_WM GFXPIPE_3D_CMD(0x0, 0x14)
#define CMD_3DSTATE_CONSTANT_VS GFXPIPE_3D_CMD(0x0, 0x15)
#define CMD_3DSTATE_CONSTANT_GS GFXPIPE_3D_CMD(0x0, 0x16)
+#define CMD_3DSTATE_CONSTANT_PS GFXPIPE_3D_CMD(0x0, 0x17)
#define CMD_3DSTATE_SAMPLE_MASK GFXPIPE_3D_CMD(0x0, 0x18)
#define CMD_3DSTATE_CONSTANT_HS GFXPIPE_3D_CMD(0x0, 0x19)
#define CMD_3DSTATE_CONSTANT_DS GFXPIPE_3D_CMD(0x0, 0x1A)
diff --git a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
index 04179b2a48e1..fd2ce7ace510 100644
--- a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
+++ b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
@@ -17,6 +17,7 @@
#define XE_INSTR_MI REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x0)
#define XE_INSTR_GSC REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x2)
#define XE_INSTR_GFXPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
+#define XE_INSTR_GFX_STATE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x4)
/*
* Most (but not all) instructions have a "length" field in the instruction
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 0b1266c88a6a..af71b87d8030 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -104,9 +104,6 @@
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
-#define FF_SLICE_CS_CHICKEN2(base) XE_REG((base) + 0xe4, XE_REG_OPTION_MASKED)
-#define PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
-
#define CS_DEBUG_MODE1(base) XE_REG((base) + 0xec, XE_REG_OPTION_MASKED)
#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define REPLAY_MODE_GRANULARITY REG_BIT(0)
@@ -125,7 +122,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
+#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 9886ec9cb08e..e2a925be137c 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -38,4 +38,11 @@
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
+#define GSCI_TIMER_STATUS XE_REG(0x11ca28)
+#define GSCI_TIMER_STATUS_VALUE REG_GENMASK(1, 0)
+#define GSCI_TIMER_STATUS_RESET_IN_PROGRESS 0
+#define GSCI_TIMER_STATUS_TIMER_EXPIRED 1
+#define GSCI_TIMER_STATUS_RESET_COMPLETE 2
+#define GSCI_TIMER_STATUS_OUT_OF_RESET 3
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 15ac2d284d48..94445810ccc9 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -69,10 +69,14 @@
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
+#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
#define WM_CHICKEN3 XE_REG_MCR(0x5588, XE_REG_OPTION_MASKED)
#define HIZ_PLANE_COMPRESSION_DIS REG_BIT(10)
+#define CHICKEN_RASTER_1 XE_REG_MCR(0x6204, XE_REG_OPTION_MASKED)
+#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+
#define CHICKEN_RASTER_2 XE_REG_MCR(0x6208, XE_REG_OPTION_MASKED)
#define TBIMR_FAST_CLIP REG_BIT(5)
@@ -97,7 +101,8 @@
#define CACHE_MODE_1 XE_REG(0x7004, XE_REG_OPTION_MASKED)
#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
-#define COMMON_SLICE_CHICKEN1 XE_REG(0x7010)
+#define COMMON_SLICE_CHICKEN1 XE_REG(0x7010, XE_REG_OPTION_MASKED)
+#define DISABLE_BOTTOM_CLIP_RECTANGLE_TEST REG_BIT(14)
#define HIZ_CHICKEN XE_REG(0x7018, XE_REG_OPTION_MASKED)
#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
@@ -141,6 +146,10 @@
#define XE2_FLAT_CCS_BASE_RANGE_LOWER XE_REG_MCR(0x8800)
#define XE2_FLAT_CCS_ENABLE REG_BIT(0)
+#define XE2_FLAT_CCS_BASE_LOWER_ADDR_MASK REG_GENMASK(31, 6)
+
+#define XE2_FLAT_CCS_BASE_RANGE_UPPER XE_REG_MCR(0x8804)
+#define XE2_FLAT_CCS_BASE_UPPER_ADDR_MASK REG_GENMASK(7, 0)
#define GSCPSMI_BASE XE_REG(0x880c)
@@ -156,7 +165,10 @@
#define MIRROR_FUSE3 XE_REG(0x9118)
#define XE2_NODE_ENABLE_MASK REG_GENMASK(31, 16)
#define L3BANK_PAIR_COUNT 4
+#define XEHPC_GT_L3_MODE_MASK REG_GENMASK(7, 4)
+#define XE2_GT_L3_MODE_MASK REG_GENMASK(7, 4)
#define L3BANK_MASK REG_GENMASK(3, 0)
+#define XELP_GT_L3_MODE_MASK REG_GENMASK(7, 0)
/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
#define MAX_MSLICES 4
#define MEML3_EN_MASK REG_GENMASK(3, 0)
@@ -271,6 +283,10 @@
#define FORCEWAKE_GT XE_REG(0xa188)
#define PG_ENABLE XE_REG(0xa210)
+#define VD2_MFXVDENC_POWERGATE_ENABLE REG_BIT(8)
+#define VD2_HCP_POWERGATE_ENABLE REG_BIT(7)
+#define VD0_MFXVDENC_POWERGATE_ENABLE REG_BIT(4)
+#define VD0_HCP_POWERGATE_ENABLE REG_BIT(3)
#define CTC_MODE XE_REG(0xa26c)
#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
@@ -349,6 +365,7 @@
#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED)
+#define XE2_EUPEND_CHK_FLUSH_DIS REG_BIT(14)
#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
@@ -364,17 +381,22 @@
#define DISABLE_EARLY_READ REG_BIT(14)
#define ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define DISABLE_TDL_SVHS_GATING REG_BIT(1)
#define DISABLE_DOP_GATING REG_BIT(0)
#define RT_CTRL XE_REG_MCR(0xe530)
#define DIS_NULL_QUERY REG_BIT(10)
+#define EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK XE_REG_MCR(0xe534)
+#define EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT REG_BIT(31)
+
#define XEHP_HDC_CHICKEN0 XE_REG_MCR(0xe5f0, XE_REG_OPTION_MASKED)
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
+#define WR_REQ_CHAINING_DIS REG_BIT(26)
#define TGM_WRITE_EOM_FORCE REG_BIT(17)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define SEQUENTIAL_ACCESS_UPGRADE_DISABLE REG_BIT(13)
@@ -439,7 +461,13 @@
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
-#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4))
+/*
+ * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
+ * On newer platforms, VFs are using memory-based interrupts instead.
+ * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
+ */
+
+#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
#define INTR_GSC REG_BIT(31)
#define INTR_GUC REG_BIT(25)
#define INTR_MGUC REG_BIT(24)
@@ -450,16 +478,16 @@
#define INTR_VECS(x) REG_BIT(31 - (x))
#define INTR_VCS(x) REG_BIT(x)
-#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030)
-#define VCS_VECS_INTR_ENABLE XE_REG(0x190034)
-#define GUC_SG_INTR_ENABLE XE_REG(0x190038)
+#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
+#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
+#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
#define ENGINE1_MASK REG_GENMASK(31, 16)
#define ENGINE0_MASK REG_GENMASK(15, 0)
-#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c)
-#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044)
-#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048)
+#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
+#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
-#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4))
+#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
#define INTR_DATA_VALID REG_BIT(31)
#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
@@ -468,16 +496,16 @@
#define OTHER_GSC_HECI2_INSTANCE 3
#define OTHER_GSC_INSTANCE 6
-#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
-#define RCS0_RSVD_INTR_MASK XE_REG(0x190090)
-#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0)
-#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
-#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
-#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
+#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
+#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
+#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
+#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
-#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
-#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
-#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
+#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
+#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
@@ -486,6 +514,7 @@
#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
+#define GSC_ER_COMPLETE REG_BIT(5)
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gtt_defs.h b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
new file mode 100644
index 000000000000..4389e5a76f89
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GTT_DEFS_H_
+#define _XE_GTT_DEFS_H_
+
+#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
+#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
+
+#define GGTT_PTE_VFID GENMASK_ULL(11, 2)
+
+#define GUC_GGTT_TOP 0xFEE00000
+
+#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
+#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
+#define XE_PPGTT_PDE_PDPE_PAT2 BIT_ULL(12)
+#define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
+#define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
+#define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
+
+#define XE_PDE_PS_2M BIT_ULL(7)
+#define XE_PDPE_PS_1G BIT_ULL(7)
+#define XE_PDE_IPS_64K BIT_ULL(11)
+
+#define XE_GGTT_PTE_DM BIT_ULL(1)
+#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
+#define XE_PPGTT_PTE_DM BIT_ULL(11)
+#define XE_PDE_64K BIT_ULL(6)
+#define XE_PTE_PS64 BIT_ULL(8)
+#define XE_PTE_NULL BIT_ULL(9)
+
+#define XE_PAGE_PRESENT BIT_ULL(0)
+#define XE_PAGE_RW BIT_ULL(1)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
index 92320bbc9d3d..11682e675e0f 100644
--- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
@@ -100,16 +100,23 @@
#define GT_PM_CONFIG XE_REG(0x13816c)
#define GT_DOORBELL_ENABLE REG_BIT(0)
-#define GUC_HOST_INTERRUPT XE_REG(0x1901f0)
+#define GUC_HOST_INTERRUPT XE_REG(0x1901f0, XE_REG_OPTION_VF)
-#define VF_SW_FLAG(n) XE_REG(0x190240 + (n) * 4)
+#define VF_SW_FLAG(n) XE_REG(0x190240 + (n) * 4, XE_REG_OPTION_VF)
#define VF_SW_FLAG_COUNT 4
-#define MED_GUC_HOST_INTERRUPT XE_REG(0x190304)
+#define MED_GUC_HOST_INTERRUPT XE_REG(0x190304, XE_REG_OPTION_VF)
-#define MED_VF_SW_FLAG(n) XE_REG(0x190310 + (n) * 4)
+#define MED_VF_SW_FLAG(n) XE_REG(0x190310 + (n) * 4, XE_REG_OPTION_VF)
#define MED_VF_SW_FLAG_COUNT 4
+#define GUC_TLB_INV_CR XE_REG(0xcee8)
+#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
+#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
+#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
+
/* GuC Interrupt Vector */
#define GUC_INTR_GUC2HOST REG_BIT(15)
#define GUC_INTR_EXEC_ERROR REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index c50e7650c09a..23f7dc5bbe99 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -6,6 +6,8 @@
#ifndef _XE_REG_DEFS_H_
#define _XE_REG_DEFS_H_
+#include <linux/build_bug.h>
+
#include "compat-i915-headers/i915_reg_defs.h"
/**
@@ -36,6 +38,10 @@ struct xe_reg {
*/
u32 mcr:1;
/**
+ * @vf: register is accessible from the Virtual Function.
+ */
+ u32 vf:1;
+ /**
* @ext: access MMIO extension space for current register.
*/
u32 ext:1;
@@ -44,6 +50,7 @@ struct xe_reg {
u32 raw;
};
};
+static_assert(sizeof(struct xe_reg) == sizeof(u32));
/**
* struct xe_reg_mcr - MCR register definition
@@ -76,6 +83,13 @@ struct xe_reg_mcr {
#define XE_REG_OPTION_MASKED .masked = 1
/**
+ * XE_REG_OPTION_VF - Register is "VF" accessible.
+ *
+ * To be used with XE_REG() and XE_REG_INITIALIZER().
+ */
+#define XE_REG_OPTION_VF .vf = 1
+
+/**
* XE_REG_INITIALIZER - Initializer for xe_reg_t.
* @r_: Register offset
* @...: Additional options like access mode. See struct xe_reg for available
@@ -117,4 +131,9 @@ struct xe_reg_mcr {
.__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
})
+static inline bool xe_reg_is_valid(struct xe_reg r)
+{
+ return r.addr;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 2c214bb9b671..722fb6dbb72e 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -57,7 +57,7 @@
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
-#define GFX_MSTR_IRQ XE_REG(0x190010)
+#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
#define DISPLAY_IRQ REG_BIT(16)
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
index 58a4e0fad1e1..617ddb84b7fa 100644
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
@@ -14,4 +14,7 @@
#define LMEM_EN REG_BIT(31)
#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
+#define VF_CAP REG_BIT(0)
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 9d1d88af8b2f..8cf2367449d8 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# "live" kunit tests
-obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
+xe_live_test-y = xe_live_test_mod.o \
xe_bo_test.o \
xe_dma_buf_test.o \
xe_migrate_test.o \
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3436fd9cf2b2..9f3c02826464 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret;
/* TODO: Sanity check */
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
@@ -163,7 +163,7 @@ static int ccs_test_run_device(struct xe_device *xe)
return 0;
}
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id) {
/* For igfx run only for primary tile */
@@ -172,7 +172,7 @@ static int ccs_test_run_device(struct xe_device *xe)
ccs_test_run_tile(xe, tile, test);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
struct xe_bo *bo, *external;
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt;
int err, i, id;
@@ -335,12 +335,12 @@ static int evict_test_run_device(struct xe_device *xe)
return 0;
}
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id)
evict_test_run_tile(xe, tile, test);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
index f408f17f2164..a324cde77db8 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo_test.c
@@ -19,8 +19,3 @@ static struct kunit_suite xe_bo_test_suite = {
};
kunit_test_suite(xe_bo_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_bo kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 9f6d571d7fa9..e7f9b531c465 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -12,6 +12,7 @@
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool p2p_enabled(struct dma_buf_test_params *params)
{
@@ -36,14 +37,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0;
- if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ (params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */
mem_type = XE_PL_TT;
@@ -93,7 +94,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just
* likely as fast from system memory.
*/
- if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+ if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
@@ -115,17 +116,17 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
- (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ (params->mem_mask & XE_BO_FLAG_VRAM0))
return;
size = PAGE_SIZE;
- if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
+ if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
+ ttm_bo_type_device, params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -148,7 +149,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/
if (params->force_different_devices &&
!p2p_enabled(params) &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
@@ -161,7 +162,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
params->force_different_devices &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS)
@@ -180,7 +181,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import));
} else if (!params->force_different_devices ||
p2p_enabled(params) ||
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
@@ -203,52 +204,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object.
*/
static const struct dma_buf_test_params test_params[] = {
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM},
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{}
@@ -259,6 +260,7 @@ static int dma_buf_run_device(struct xe_device *xe)
const struct dma_buf_test_params *params;
struct kunit *test = xe_cur_kunit();
+ xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
struct dma_buf_test_params p = *params;
@@ -266,6 +268,7 @@ static int dma_buf_run_device(struct xe_device *xe)
test->priv = &p;
xe_test_dmabuf_import_same_driver(xe);
}
+ xe_pm_runtime_put(xe);
/* A non-zero return would halt iteration over driver devices */
return 0;
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
index 9f5a9cda8c0f..99cdb718b6c6 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
@@ -18,8 +18,3 @@ static struct kunit_suite xe_dma_buf_test_suite = {
};
kunit_test_suite(xe_dma_buf_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_dma_buf kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
new file mode 100644
index 000000000000..ee30a1939eb0
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+
+static int guc_id_mgr_test_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm;
+
+ xe_kunit_helper_xe_device_test_init(test);
+ idm = &xe_device_get_gt(test->priv, 0)->uc.guc.submission_state.idm;
+
+ mutex_init(idm_mutex(idm));
+ test->priv = idm;
+ return 0;
+}
+
+static void bad_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, xe_guc_id_mgr_init(idm, 0));
+ KUNIT_EXPECT_EQ(test, -ERANGE, xe_guc_id_mgr_init(idm, GUC_ID_MAX + 1));
+}
+
+static void no_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ mutex_lock(idm_mutex(idm));
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve_locked(idm, 0));
+ mutex_unlock(idm_mutex(idm));
+
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve(idm, 1, 1));
+}
+
+static void init_fini(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+ KUNIT_EXPECT_NOT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, GUC_ID_MAX);
+ __fini_idm(NULL, idm);
+ KUNIT_EXPECT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, 0);
+}
+
+static void check_used(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm->used, n);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, idm->used, n + 1);
+ }
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->used);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_quota(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total - 1; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total - n), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, idm->total - n, 1), -EDQUOT);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 1), 0);
+ }
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->total);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_all(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++)
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ for (n = 0; n < idm->total; n++)
+ idm_release_chunk_locked(idm, n, 1);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static struct kunit_case guc_id_mgr_test_cases[] = {
+ KUNIT_CASE(bad_init),
+ KUNIT_CASE(no_init),
+ KUNIT_CASE(init_fini),
+ KUNIT_CASE(check_used),
+ KUNIT_CASE(check_quota),
+ KUNIT_CASE_SLOW(check_all),
+ {}
+};
+
+static struct kunit_suite guc_id_mgr_suite = {
+ .name = "guc_idm",
+ .test_cases = guc_id_mgr_test_cases,
+
+ .init = guc_id_mgr_test_init,
+ .exit = NULL,
+};
+
+kunit_test_suites(&guc_id_mgr_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
new file mode 100644
index 000000000000..eb1ea99a5a8b
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe live kunit tests");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index c347e2c29f81..977d5f4e4490 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -10,6 +10,7 @@
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
const char *str, struct kunit *test)
@@ -112,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -190,7 +191,7 @@ out_unlock:
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
@@ -202,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
- region = XE_BO_CREATE_VRAM1_BIT;
+ region = XE_BO_FLAG_VRAM1;
else
- region = XE_BO_CREATE_VRAM0_BIT;
+ region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
}
@@ -280,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -289,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -300,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -423,17 +424,19 @@ static int migrate_test_run_device(struct xe_device *xe)
struct xe_tile *tile;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, true);
- xe_device_mem_access_get(xe);
xe_migrate_sanity_test(m, test);
- xe_device_mem_access_put(xe);
xe_vm_unlock(m->q->vm);
}
+ xe_pm_runtime_put(xe);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
index cf0c173b945f..eb0d8963419c 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
@@ -18,8 +18,3 @@ static struct kunit_suite xe_migrate_test_suite = {
};
kunit_test_suite(xe_migrate_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_migrate kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index df5c36b70ab4..1b8617075b37 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -10,10 +10,11 @@
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
-#include "xe_pci.h"
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mocs.h"
-#include "xe_device.h"
+#include "xe_pci.h"
+#include "xe_pm.h"
struct live_mocs {
struct xe_mocs_info table;
@@ -28,6 +29,8 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
flags = get_mocs_settings(gt_to_xe(gt), &arg->table);
+ kunit_info(test, "gt %d", gt->info.id);
+ kunit_info(test, "gt type %d", gt->info.type);
kunit_info(test, "table size %d", arg->table.size);
kunit_info(test, "table uc_index %d", arg->table.uc_index);
kunit_info(test, "table n_entries %d", arg->table.n_entries);
@@ -38,69 +41,72 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
+ struct kunit *test = xe_cur_kunit();
+ u32 l3cc, l3cc_expected;
unsigned int i;
- u32 l3cc;
u32 reg_val;
u32 ret;
- struct kunit *test = xe_cur_kunit();
-
- xe_device_mem_access_get(gt_to_xe(gt));
ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "L3CC entries:%d\n", info->n_entries);
- for (i = 0;
- i < (info->n_entries + 1) / 2 ?
- (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
- get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
- reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
- else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_LNCFCMOCS(i).addr, reg_val, l3cc);
- if (reg_val != l3cc)
- KUNIT_FAIL(test, "l3cc reg 0x%x has incorrect val.\n",
- XELP_LNCFCMOCS(i).addr);
+
+ for (i = 0; i < info->n_entries; i++) {
+ if (!(i & 1)) {
+ if (regs_are_mcr(gt))
+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1));
+ else
+ reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1));
+
+ mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
+ } else {
+ /* Just re-use value read on previous iteration */
+ reg_val >>= 16;
+ }
+
+ l3cc_expected = get_entry_l3cc(info, i);
+ l3cc = reg_val & 0xffff;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, l3cc_expected, l3cc);
+
+ KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
+ "l3cc idx=%u has incorrect val.\n", i);
}
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
}
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct xe_device *xe = gt_to_xe(gt);
-
+ struct kunit *test = xe_cur_kunit();
+ u32 mocs, mocs_expected;
unsigned int i;
- u32 mocs;
u32 reg_val;
u32 ret;
- struct kunit *test = xe_cur_kunit();
+ KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
+ "Unused entries index should have been defined\n");
- xe_device_mem_access_get(gt_to_xe(gt));
ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "Global MOCS entries:%d\n", info->n_entries);
- drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
- "Unused entries index should have been defined\n");
- for (i = 0;
- i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+
+ for (i = 0; i < info->n_entries; i++) {
+ if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_GLOBAL_MOCS(i).addr, reg_val, mocs);
- if (reg_val != mocs)
- KUNIT_FAIL(test, "mocs reg 0x%x has incorrect val.\n",
- XELP_GLOBAL_MOCS(i).addr);
+
+ mocs_expected = get_entry_control(info, i);
+ mocs = reg_val;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, mocs_expected, mocs);
+
+ KUNIT_EXPECT_EQ_MSG(test, mocs_expected, mocs,
+ "mocs reg 0x%x has incorrect val.\n", i);
}
+
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
}
static int mocs_kernel_test_run_device(struct xe_device *xe)
@@ -113,6 +119,8 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
unsigned int flags;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
if (flags & HAS_GLOBAL_MOCS)
@@ -120,6 +128,9 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
@@ -139,6 +150,8 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
int id;
struct kunit *test = xe_cur_kunit();
+ xe_pm_runtime_get(xe);
+
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
kunit_info(test, "mocs_reset_test before reset\n");
@@ -156,6 +169,9 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ee40f31e1e12..6315886b659e 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -19,8 +19,3 @@ static struct kunit_suite xe_mocs_test_suite = {
};
kunit_test_suite(xe_mocs_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_mocs kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 44570d888355..9d0c715142b9 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -71,6 +71,7 @@ static const struct platform_test_case cases[] = {
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
};
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 7c124475c428..541361caff3b 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -86,7 +86,8 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
};
xe_gt_assert(q->gt, second_idx <= bb->len);
- xe_gt_assert(q->gt, q->vm->flags & XE_VM_FLAG_MIGRATION);
+ xe_gt_assert(q->gt, xe_sched_job_is_migration(q));
+ xe_gt_assert(q->gt, q->width == 1);
return __xe_bb_create_job(q, bb, addr);
}
@@ -96,7 +97,8 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
{
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
- xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
+ xe_gt_assert(q->gt, !xe_sched_job_is_migration(q));
+ xe_gt_assert(q->gt, q->width == 1);
return __xe_bb_create_job(q, bb, &addr);
}
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 6603a0ea79c5..bc1f794e3e61 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -22,6 +22,7 @@
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_migrate.h"
+#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace.h"
@@ -111,7 +112,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
static bool xe_bo_is_user(struct xe_bo *bo)
{
- return bo->flags & XE_BO_CREATE_USER_BIT;
+ return bo->flags & XE_BO_FLAG_USER;
}
static struct xe_migrate *
@@ -137,16 +138,13 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+ if (bo_flags & XE_BO_FLAG_SYSTEM) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_TT,
};
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = XE_PL_TT;
}
}
@@ -167,12 +165,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
* For eviction / restore on suspend / resume objects
* pinned in VRAM must be contiguous
*/
- if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT))
+ if (bo_flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
- if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
+ if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT;
} else {
@@ -181,37 +179,27 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
}
places[*c] = place;
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = mem_type;
}
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo->props.preferred_gt == XE_GT1) {
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- } else {
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- }
+ if (bo_flags & XE_BO_FLAG_VRAM0)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
+ if (bo_flags & XE_BO_FLAG_VRAM1)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+ if (bo_flags & XE_BO_FLAG_STOLEN) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT) ?
+ .flags = bo_flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -223,17 +211,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
{
u32 c = 0;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
-
- /* The order of placements should indicate preferred location */
-
- if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
- try_add_system(xe, bo, bo_flags, &c);
- try_add_vram(xe, bo, bo_flags, &c);
- } else {
- try_add_vram(xe, bo, bo_flags, &c);
- try_add_system(xe, bo, bo_flags, &c);
- }
+ try_add_vram(xe, bo, bo_flags, &c);
+ try_add_system(xe, bo, bo_flags, &c);
try_add_stolen(xe, bo, bo_flags, &c);
if (!c)
@@ -361,7 +340,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
break;
}
- WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
+ WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
* Display scanout is always non-coherent with the CPU cache.
@@ -369,8 +348,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
* For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
* require a CPU:WC mapping.
*/
- if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
- (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+ if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
+ (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
@@ -737,7 +716,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
@@ -761,7 +740,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
ret = -EINVAL;
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
@@ -779,7 +758,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem, handle_system_ccs);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
if (!move_lacks_source) {
@@ -804,7 +783,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
out:
return ret;
@@ -816,7 +795,6 @@ out:
* @bo: The buffer object to move.
*
* On successful completion, the object memory will be moved to sytem memory.
- * This function blocks until the object has been fully moved.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -873,9 +851,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (ret)
goto err_res_free;
- dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
- false, MAX_SCHEDULE_TIMEOUT);
-
return 0;
err_res_free:
@@ -888,7 +863,6 @@ err_res_free:
* @bo: The buffer object to move.
*
* On successful completion, the object memory will be moved back to VRAM.
- * This function blocks until the object has been fully moved.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -930,9 +904,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (ret)
goto err_res_free;
- dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
- false, MAX_SCHEDULE_TIMEOUT);
-
return 0;
err_res_free:
@@ -1126,25 +1097,18 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static bool should_migrate_to_system(struct xe_bo *bo)
-{
- struct xe_device *xe = xe_bo_device(bo);
-
- return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
-}
-
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
- bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
+ bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret;
- int idx, r = 0;
+ int idx;
if (needs_rpm)
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
ret = ttm_bo_vm_reserve(tbo, vmf);
if (ret)
@@ -1153,17 +1117,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo);
- if (should_migrate_to_system(bo)) {
- r = xe_bo_migrate(bo, XE_PL_TT);
- if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
- ret = VM_FAULT_NOPAGE;
- else if (r)
- ret = VM_FAULT_SIGBUS;
- }
- if (!ret)
- ret = ttm_bo_vm_fault_reserved(vmf,
- vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
@@ -1184,7 +1139,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
dma_resv_unlock(tbo->base.resv);
out:
if (needs_rpm)
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -1261,18 +1216,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
- if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
- !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
- xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
+ if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
+ !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
+ ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
+ (flags & XE_BO_NEEDS_64K))) {
aligned_size = ALIGN(size, SZ_64K);
if (type != ttm_bo_type_device)
size = ALIGN(size, SZ_64K);
- flags |= XE_BO_INTERNAL_64K;
+ flags |= XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_64K >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
- flags &= ~XE_BO_INTERNAL_64K;
+ flags &= ~XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_4K >> PAGE_SHIFT;
}
@@ -1291,9 +1247,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
- bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
- bo->props.preferred_gt = XE_BO_PROPS_INVALID;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
INIT_LIST_HEAD(&bo->pinned_link);
#ifdef CONFIG_PROC_FS
@@ -1304,11 +1257,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
if (resv) {
- ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
+ ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
ctx.resv = resv;
}
- if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
+ if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm);
@@ -1318,7 +1271,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
/* Defer populating type_sg bos */
placement = (type == ttm_bo_type_sg ||
- bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
+ bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
&bo->placement;
err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
placement, alignment,
@@ -1373,21 +1326,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
{
struct ttm_place *place = bo->placements;
- if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
+ if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT;
- switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
- case XE_BO_CREATE_VRAM0_BIT:
+ switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
+ case XE_BO_FLAG_VRAM0:
place->mem_type = XE_PL_VRAM0;
break;
- case XE_BO_CREATE_VRAM1_BIT:
+ case XE_BO_FLAG_VRAM1:
place->mem_type = XE_PL_VRAM1;
break;
- case XE_BO_CREATE_STOLEN_BIT:
+ case XE_BO_FLAG_STOLEN:
place->mem_type = XE_PL_STOLEN;
break;
@@ -1421,7 +1374,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo))
return bo;
- flags |= XE_BO_FIXED_PLACEMENT_BIT;
+ flags |= XE_BO_FLAG_FIXED_PLACEMENT;
err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
if (err) {
xe_bo_free(bo);
@@ -1431,7 +1384,7 @@ __xe_bo_create_locked(struct xe_device *xe,
bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
vm && !xe_vm_in_fault_mode(vm) &&
- flags & XE_BO_CREATE_USER_BIT ?
+ flags & XE_BO_FLAG_USER ?
&vm->lru_bulk_move : NULL, size,
cpu_caching, type, flags);
if (IS_ERR(bo))
@@ -1448,13 +1401,13 @@ __xe_bo_create_locked(struct xe_device *xe,
xe_vm_get(vm);
bo->vm = vm;
- if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
- if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
tile = xe_device_get_root_tile(xe);
xe_assert(xe, tile);
- if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
start + bo->size, U64_MAX);
} else {
@@ -1497,7 +1450,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, type,
- flags | XE_BO_CREATE_USER_BIT);
+ flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1526,12 +1479,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
u64 start = offset == ~0ull ? 0 : offset;
u64 end = offset == ~0ull ? offset : start + size;
- if (flags & XE_BO_CREATE_STOLEN_BIT &&
+ if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
- flags |= XE_BO_CREATE_GGTT_BIT;
+ flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_NEEDS_CPU_ACCESS);
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return bo;
@@ -1628,13 +1581,15 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{
struct xe_bo *bo;
+ u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
+
+ dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
- bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
+ (*src)->size, dst_flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1709,8 +1664,8 @@ int xe_bo_pin(struct xe_bo *bo)
xe_assert(xe, !xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */
- xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_GGTT_BIT));
+ xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_GGTT));
/*
* No reason we can't support pinning imported dma-bufs we just don't
@@ -1731,7 +1686,7 @@ int xe_bo_pin(struct xe_bo *bo)
* during suspend / resume (force restore to same physical address).
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_INTERNAL_TEST)) {
+ bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@@ -1799,7 +1754,7 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_INTERNAL_TEST)) {
+ bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@@ -1902,7 +1857,7 @@ int xe_bo_vmap(struct xe_bo *bo)
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
+ if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -1984,29 +1939,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags = 0;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
- bo_flags |= XE_BO_DEFER_BACKING;
+ bo_flags |= XE_BO_FLAG_DEFER_BACKING;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
- bo_flags |= XE_BO_SCANOUT_BIT;
+ bo_flags |= XE_BO_FLAG_SCANOUT;
- bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
+ bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
- if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
+ if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL;
- bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
+ bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
}
if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
return -EINVAL;
@@ -2247,6 +2202,9 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
{
struct xe_device *xe = xe_bo_device(bo);
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
+ return false;
+
if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
return false;
@@ -2255,7 +2213,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
* can't be used since there's no CCS storage associated with
* non-VRAM addresses.
*/
- if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
+ if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
return true;
@@ -2324,9 +2282,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device,
- XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index c59ad15961ce..a885b14bf595 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -13,48 +13,34 @@
#include "xe_vm_types.h"
#include "xe_vm.h"
-/**
- * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
- * @vm: The vm
- */
-#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
-
-
-
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
-#define XE_BO_CREATE_USER_BIT BIT(0)
+#define XE_BO_FLAG_USER BIT(0)
/* The bits below need to be contiguous, or things break */
-#define XE_BO_CREATE_SYSTEM_BIT BIT(1)
-#define XE_BO_CREATE_VRAM0_BIT BIT(2)
-#define XE_BO_CREATE_VRAM1_BIT BIT(3)
-#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \
- XE_BO_CREATE_VRAM1_BIT)
+#define XE_BO_FLAG_SYSTEM BIT(1)
+#define XE_BO_FLAG_VRAM0 BIT(2)
+#define XE_BO_FLAG_VRAM1 BIT(3)
+#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
-#define XE_BO_CREATE_STOLEN_BIT BIT(4)
-#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
- (IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
- XE_BO_CREATE_SYSTEM_BIT)
-#define XE_BO_CREATE_GGTT_BIT BIT(5)
-#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
-#define XE_BO_CREATE_PINNED_BIT BIT(7)
-#define XE_BO_CREATE_NO_RESV_EVICT BIT(8)
-#define XE_BO_DEFER_BACKING BIT(9)
-#define XE_BO_SCANOUT_BIT BIT(10)
-#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
-#define XE_BO_PAGETABLE BIT(12)
-#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
-#define XE_BO_NEEDS_UC BIT(14)
+#define XE_BO_FLAG_STOLEN BIT(4)
+#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
+ XE_BO_FLAG_VRAM0 << (tile)->id : \
+ XE_BO_FLAG_SYSTEM)
+#define XE_BO_FLAG_GGTT BIT(5)
+#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
+#define XE_BO_FLAG_PINNED BIT(7)
+#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
+#define XE_BO_FLAG_DEFER_BACKING BIT(9)
+#define XE_BO_FLAG_SCANOUT BIT(10)
+#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
+#define XE_BO_FLAG_PAGETABLE BIT(12)
+#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
+#define XE_BO_FLAG_NEEDS_UC BIT(14)
+#define XE_BO_NEEDS_64K BIT(15)
+#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
/* this one is trigger internally only */
-#define XE_BO_INTERNAL_TEST BIT(30)
-#define XE_BO_INTERNAL_64K BIT(31)
-
-#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
-#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
-#define XE_PPGTT_PDE_PDPE_PAT2 BIT_ULL(12)
-#define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
-#define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
-#define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
+#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
+#define XE_BO_FLAG_INTERNAL_64K BIT(31)
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
@@ -68,20 +54,6 @@
#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
-#define XE_PDE_PS_2M BIT_ULL(7)
-#define XE_PDPE_PS_1G BIT_ULL(7)
-#define XE_PDE_IPS_64K BIT_ULL(11)
-
-#define XE_GGTT_PTE_DM BIT_ULL(1)
-#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
-#define XE_PPGTT_PTE_DM BIT_ULL(11)
-#define XE_PDE_64K BIT_ULL(6)
-#define XE_PTE_PS64 BIT_ULL(8)
-#define XE_PTE_NULL BIT_ULL(9)
-
-#define XE_PAGE_PRESENT BIT_ULL(0)
-#define XE_PAGE_RW BIT_ULL(1)
-
#define XE_PL_SYSTEM TTM_PL_SYSTEM
#define XE_PL_TT TTM_PL_TT
#define XE_PL_VRAM0 TTM_PL_VRAM
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 7a264a9ca06e..541b49007d73 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
return ret;
}
- if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
+ if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);
@@ -220,7 +220,7 @@ int xe_bo_restore_user(struct xe_device *xe)
list_splice_tail(&still_in_list, &xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
- /* Wait for validate to complete */
+ /* Wait for restore to complete */
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 14ef13b7b421..86422e113d39 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -56,25 +56,6 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
- /** @props: BO user controlled properties */
- struct {
- /** @preferred_mem: preferred memory class for this BO */
- s16 preferred_mem_class;
- /** @prefered_gt: preferred GT for this BO */
- s16 preferred_gt;
- /** @preferred_mem_type: preferred memory type */
- s32 preferred_mem_type;
- /**
- * @cpu_atomic: the CPU expects to do atomics operations to
- * this BO
- */
- bool cpu_atomic;
- /**
- * @device_atomic: the device expects to do atomics operations
- * to this BO
- */
- bool device_atomic;
- } props;
/** @freed: List node for delayed put. */
struct llist_node freed;
/** @created: Whether the bo has passed initial creation */
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 01db5b27bec5..0b7aebaae843 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -5,6 +5,7 @@
#include "xe_debugfs.h"
+#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -12,6 +13,8 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt_debugfs.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
#include "xe_step.h"
#ifdef CONFIG_DRM_XE_DEBUG
@@ -37,6 +40,8 @@ static int info(struct seq_file *m, void *data)
struct xe_gt *gt;
u8 id;
+ xe_pm_runtime_get(xe);
+
drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
@@ -63,11 +68,22 @@ static int info(struct seq_file *m, void *data)
gt->info.engine_mask);
}
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
+static int sriov_info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_sriov_print_info(xe, &p);
return 0;
}
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
+ { .name = "sriov_info", .show = sriov_info, },
};
static int forcewake_open(struct inode *inode, struct file *file)
@@ -76,8 +92,7 @@ static int forcewake_open(struct inode *inode, struct file *file)
struct xe_gt *gt;
u8 id;
- xe_device_mem_access_get(xe);
-
+ xe_pm_runtime_get(xe);
for_each_gt(gt, xe, id)
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
@@ -92,8 +107,7 @@ static int forcewake_release(struct inode *inode, struct file *file)
for_each_gt(gt, xe, id)
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -127,7 +141,7 @@ void xe_debugfs_register(struct xe_device *xe)
if (man) {
char name[16];
- sprintf(name, "vram%d_mm", mem_type - XE_PL_VRAM0);
+ snprintf(name, sizeof(name), "vram%d_mm", mem_type - XE_PL_VRAM0);
ttm_resource_manager_create_debugfs(man, root, name);
}
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 68d3d623a05b..3d7980232be1 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -9,10 +9,13 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
+#include <drm/drm_managed.h>
+
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc_ct.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
@@ -64,9 +67,11 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
{
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
- xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
- if (ss->vm)
- xe_vm_snapshot_capture_delayed(ss->vm);
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
}
@@ -74,17 +79,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
- struct xe_device *xe = coredump_to_xe(coredump);
- struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+ struct xe_device *xe;
+ struct xe_devcoredump_snapshot *ss;
struct drm_printer p;
struct drm_print_iterator iter;
struct timespec64 ts;
int i;
- /* Our device is gone already... */
- if (!data || !coredump_to_xe(coredump))
+ if (!coredump)
return -ENODEV;
+ xe = coredump_to_xe(coredump);
+ ss = &coredump->snapshot;
+
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
@@ -117,10 +124,8 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
&p);
- if (coredump->snapshot.vm) {
- drm_printf(&p, "\n**** VM state ****\n");
- xe_vm_snapshot_print(coredump->snapshot.vm, &p);
- }
+ drm_printf(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(coredump->snapshot.vm, &p);
return count - iter.remain;
}
@@ -180,10 +185,12 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
}
}
- xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job);
+ coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
@@ -196,8 +203,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
- if (ss->vm)
- queue_work(system_unbound_wq, &ss->work);
+ queue_work(system_unbound_wq, &ss->work);
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
dma_fence_end_signalling(cookie);
@@ -231,5 +237,14 @@ void xe_devcoredump(struct xe_sched_job *job)
dev_coredumpm(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free);
}
-#endif
+static void xe_driver_devcoredump_fini(struct drm_device *drm, void *arg)
+{
+ dev_coredump_put(drm->dev);
+}
+
+int xe_devcoredump_init(struct xe_device *xe)
+{
+ return drmm_add_action_or_reset(&xe->drm, xe_driver_devcoredump_fini, xe);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index df8671f0b5eb..e2fa65ce0932 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -11,10 +11,16 @@ struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
void xe_devcoredump(struct xe_sched_job *job);
+int xe_devcoredump_init(struct xe_device *xe);
#else
static inline void xe_devcoredump(struct xe_sched_job *job)
{
}
+
+static inline int xe_devcoredump_init(struct xe_device *xe)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index ca85e81fdb44..5ef9b50a20d0 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -9,6 +9,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
@@ -20,6 +21,7 @@
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_debugfs.h"
+#include "xe_devcoredump.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -45,12 +47,6 @@
#include "xe_vm.h"
#include "xe_wait_user_fence.h"
-#ifdef CONFIG_LOCKDEP
-struct lockdep_map xe_device_mem_access_lockdep_map = {
- .name = "xe_device_mem_access_lockdep_map"
-};
-#endif
-
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -136,15 +132,48 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
};
+static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct xe_device *xe = to_xe_device(file_priv->minor->dev);
+ long ret;
+
+ ret = xe_pm_runtime_get_ioctl(xe);
+ if (ret >= 0)
+ ret = drm_ioctl(file, cmd, arg);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct xe_device *xe = to_xe_device(file_priv->minor->dev);
+ long ret;
+
+ ret = xe_pm_runtime_get_ioctl(xe);
+ if (ret >= 0)
+ ret = drm_compat_ioctl(file, cmd, arg);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+#else
+/* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
+#define xe_drm_compat_ioctl NULL
+#endif
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = xe_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
+ .compat_ioctl = xe_drm_compat_ioctl,
.llseek = noop_llseek,
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
@@ -193,6 +222,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ if (xe->preempt_fence_wq)
+ destroy_workqueue(xe->preempt_fence_wq);
+
if (xe->ordered_wq)
destroy_workqueue(xe->ordered_wq);
@@ -258,9 +290,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
INIT_LIST_HEAD(&xe->pinned.external_vram);
INIT_LIST_HEAD(&xe->pinned.evicted);
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
- if (!xe->ordered_wq || !xe->unordered_wq) {
+ if (!xe->ordered_wq || !xe->unordered_wq ||
+ !xe->preempt_fence_wq) {
+ /*
+ * Cleanup done in xe_device_destroy via
+ * drmm_add_action_or_reset register above
+ */
drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
err = -ENOMEM;
goto err;
@@ -380,8 +418,70 @@ mask_err:
return err;
}
-/*
- * Initialize MMIO resources that don't require any knowledge about tile count.
+static bool verify_lmem_ready(struct xe_gt *gt)
+{
+ u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+
+ return !!val;
+}
+
+static int wait_for_lmem_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ unsigned long timeout, start;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ if (verify_lmem_ready(gt))
+ return 0;
+
+ drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
+
+ start = jiffies;
+ timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+
+ do {
+ if (signal_pending(current))
+ return -EINTR;
+
+ /*
+ * The boot firmware initializes local memory and
+ * assesses its health. If memory training fails,
+ * the punit will have been instructed to keep the GT powered
+ * down.we won't be able to communicate with it
+ *
+ * If the status check is done before punit updates the register,
+ * it can lead to the system being unusable.
+ * use a timeout and defer the probe to prevent this.
+ */
+ if (time_after(jiffies, timeout)) {
+ drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
+ return -EPROBE_DEFER;
+ }
+
+ msleep(20);
+
+ } while (!verify_lmem_ready(gt));
+
+ drm_dbg(&xe->drm, "lmem ready after %ums",
+ jiffies_to_msecs(jiffies - start));
+
+ return 0;
+}
+
+/**
+ * xe_device_probe_early: Device early probe
+ * @xe: xe device instance
+ *
+ * Initialize MMIO resources that don't require any
+ * knowledge about tile count. Also initialize pcode and
+ * check vram initialization on root tile.
+ *
+ * Return: 0 on success, error code on failure
*/
int xe_device_probe_early(struct xe_device *xe)
{
@@ -391,7 +491,13 @@ int xe_device_probe_early(struct xe_device *xe)
if (err)
return err;
- err = xe_mmio_root_tile_init(xe);
+ xe_sriov_probe_early(xe);
+
+ err = xe_pcode_probe_early(xe);
+ if (err)
+ return err;
+
+ err = wait_for_lmem_ready(xe);
if (err)
return err;
@@ -469,15 +575,15 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
if (err)
return err;
- for_each_gt(gt, xe, id) {
- err = xe_pcode_probe(gt);
- if (err)
- return err;
- }
+ for_each_gt(gt, xe, id)
+ xe_pcode_init(gt);
err = xe_display_init_noirq(xe);
if (err)
@@ -544,11 +650,7 @@ int xe_device_probe(struct xe_device *xe)
xe_hwmon_register(xe);
- err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
err_fini_display:
xe_display_driver_remove(xe);
@@ -612,87 +714,20 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
}
-bool xe_device_mem_access_ongoing(struct xe_device *xe)
-{
- if (xe_pm_read_callback_task(xe) != NULL)
- return true;
-
- return atomic_read(&xe->mem_access.ref);
-}
-
+/**
+ * xe_device_assert_mem_access - Inspect the current runtime_pm state.
+ * @xe: xe device instance
+ *
+ * To be used before any kind of memory access. It will splat a debug warning
+ * if the device is currently sleeping. But it doesn't guarantee in any way
+ * that the device is going to remain awake. Xe PM runtime get and put
+ * functions might be added to the outer bound of the memory access, while
+ * this check is intended for inner usage to splat some warning if the worst
+ * case has just happened.
+ */
void xe_device_assert_mem_access(struct xe_device *xe)
{
- XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
-}
-
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
-{
- bool active;
-
- if (xe_pm_read_callback_task(xe) == current)
- return true;
-
- active = xe_pm_runtime_get_if_active(xe);
- if (active) {
- int ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
- }
-
- return active;
-}
-
-void xe_device_mem_access_get(struct xe_device *xe)
-{
- int ref;
-
- /*
- * This looks racy, but should be fine since the pm_callback_task only
- * transitions from NULL -> current (and back to NULL again), during the
- * runtime_resume() or runtime_suspend() callbacks, for which there can
- * only be a single one running for our device. We only need to prevent
- * recursively calling the runtime_get or runtime_put from those
- * callbacks, as well as preventing triggering any access_ongoing
- * asserts.
- */
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- /*
- * Since the resume here is synchronous it can be quite easy to deadlock
- * if we are not careful. Also in practice it might be quite timing
- * sensitive to ever see the 0 -> 1 transition with the callers locks
- * held, so deadlocks might exist but are hard for lockdep to ever see.
- * With this in mind, help lockdep learn about the potentially scary
- * stuff that can happen inside the runtime_resume callback by acquiring
- * a dummy lock (it doesn't protect anything and gets compiled out on
- * non-debug builds). Lockdep then only needs to see the
- * mem_access_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> mem_access_lockdep_map.
- * For example if the (callers_locks) are ever grabbed in the
- * runtime_resume callback, lockdep should give us a nice splat.
- */
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
- lock_map_release(&xe_device_mem_access_lockdep_map);
-
- xe_pm_runtime_get(xe);
- ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
-
-}
-
-void xe_device_mem_access_put(struct xe_device *xe)
-{
- int ref;
-
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- ref = atomic_dec_return(&xe->mem_access.ref);
- xe_pm_runtime_put(xe);
-
- xe_assert(xe, ref >= 0);
+ xe_assert(xe, !xe_pm_runtime_suspended(xe));
}
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 14be34d9f543..36d4434ebccc 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -16,10 +16,6 @@ struct xe_file;
#include "xe_force_wake.h"
#include "xe_macros.h"
-#ifdef CONFIG_LOCKDEP
-extern struct lockdep_map xe_device_mem_access_lockdep_map;
-#endif
-
static inline struct xe_device *to_xe_device(const struct drm_device *dev)
{
return container_of(dev, struct xe_device, drm);
@@ -58,7 +54,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE))
+ if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
gt_id = 0;
return gt_id ? tile->media_gt : tile->primary_gt;
@@ -79,7 +75,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
if (MEDIA_VER(xe) >= 13) {
gt = xe_tile_get_gt(root_tile, gt_id);
} else {
- if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE))
+ if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
gt_id = 0;
gt = xe->tiles[gt_id].primary_gt;
@@ -137,12 +133,7 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
return &gt->mmio.fw;
}
-void xe_device_mem_access_get(struct xe_device *xe);
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
-void xe_device_mem_access_put(struct xe_device *xe);
-
void xe_device_assert_mem_access(struct xe_device *xe);
-bool xe_device_mem_access_ongoing(struct xe_device *xe);
static inline bool xe_device_in_fault_mode(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 99113a5a2b84..21677b8cd977 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -35,7 +35,9 @@ vram_d3cold_threshold_show(struct device *dev,
if (!xe)
return -EINVAL;
+ xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -58,7 +60,9 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
+ xe_pm_runtime_get(xe);
ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
+ xe_pm_runtime_put(xe);
return ret ?: count;
}
@@ -72,18 +76,14 @@ static void xe_device_sysfs_fini(struct drm_device *drm, void *arg)
sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
}
-void xe_device_sysfs_init(struct xe_device *xe)
+int xe_device_sysfs_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
int ret;
ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret) {
- drm_warn(&xe->drm, "Failed to create sysfs file\n");
- return;
- }
-
- ret = drmm_add_action_or_reset(&xe->drm, xe_device_sysfs_fini, xe);
if (ret)
- drm_warn(&xe->drm, "Failed to add sysfs fini drm action\n");
+ return ret;
+
+ return drmm_add_action_or_reset(&xe->drm, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.h b/drivers/gpu/drm/xe/xe_device_sysfs.h
index 38b240684bee..f9e83d8bd2c7 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.h
@@ -8,6 +8,6 @@
struct xe_device;
-void xe_device_sysfs_init(struct xe_device *xe);
+int xe_device_sysfs_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9785eef2e5a4..2e62450d86e1 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -321,6 +321,10 @@ struct xe_device {
struct {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
+
+ /** @sriov.pf: PF specific data */
+ struct xe_device_pf pf;
+
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
} sriov;
@@ -363,6 +367,9 @@ struct xe_device {
/** @ufence_wq: user fence wait queue */
wait_queue_head_t ufence_wq;
+ /** @preempt_fence_wq: used to serialize preempt fences */
+ struct workqueue_struct *preempt_fence_wq;
+
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
@@ -377,9 +384,6 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
- /** @mem_access.ref: ref count of memory accesses */
- atomic_t ref;
-
/**
* @mem_access.vram_userfault: Encapsulate vram_userfault
* related stuff
@@ -497,20 +501,9 @@ struct xe_device {
/* For pcode */
struct mutex sb_lock;
- /* Should be in struct intel_display */
- u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
- u8 snps_phy_failed_calibration;
- struct drm_atomic_state *modeset_restore_state;
- struct list_head global_obj_list;
-
- union {
- /* only to allow build, not used functionally */
- u32 irq_mask;
- u32 de_irq_mask[I915_MAX_PIPES];
- };
- u32 pipestat_irq_mask[I915_MAX_PIPES];
+ /* only to allow build, not used functionally */
+ u32 irq_mask;
- bool display_irqs_enabled;
u32 enabled_irq_mask;
struct intel_uncore {
@@ -522,11 +515,7 @@ struct xe_device {
unsigned int hpll_freq;
unsigned int czclk_freq;
unsigned int fsb_freq, mem_freq, is_ddr3;
- u8 vblank_enabled;
};
- struct {
- const char *dmc_firmware_path;
- } params;
void *pxp;
#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index da2627ed6ae7..68f309f5e981 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -16,6 +16,7 @@
#include "tests/xe_test.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_pm.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
@@ -33,7 +34,7 @@ static int xe_dma_buf_attach(struct dma_buf *dmabuf,
if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
return -EOPNOTSUPP;
- xe_device_mem_access_get(to_xe_device(obj->dev));
+ xe_pm_runtime_get(to_xe_device(obj->dev));
return 0;
}
@@ -42,7 +43,7 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
- xe_device_mem_access_put(to_xe_device(obj->dev));
+ xe_pm_runtime_put(to_xe_device(obj->dev));
}
static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
@@ -216,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
- ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
+ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto error;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 87c10bd7958b..08f0b7c95901 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -78,7 +78,7 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
- list_add_tail_rcu(&bo->client_link, &client->bos_list);
+ list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
@@ -96,7 +96,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo)
struct xe_drm_client *client = bo->client;
spin_lock(&client->bos_lock);
- list_del_rcu(&bo->client_link);
+ list_del(&bo->client_link);
spin_unlock(&client->bos_lock);
xe_drm_client_put(client);
@@ -154,8 +154,8 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
/* Internal objects. */
spin_lock(&client->bos_lock);
- list_for_each_entry_rcu(bo, &client->bos_list, client_link) {
- if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ list_for_each_entry(bo, &client->bos_list, client_link) {
+ if (!kref_get_unless_zero(&bo->ttm.base.refcount))
continue;
bo_meminfo(bo, stats);
xe_bo_put(bo);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 826c8b389672..97eeb973e897 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -94,48 +94,16 @@
* Unlock all
*/
+/*
+ * Add validation and rebinding to the drm_exec locking loop, since both can
+ * trigger eviction which may require sleeping dma_resv locks.
+ */
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
- struct drm_gem_object *obj;
- unsigned long index;
- int num_fences;
- int ret;
-
- ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
- if (ret)
- return ret;
-
- /*
- * 1 fence slot for the final submit, and 1 more for every per-tile for
- * GPU bind and 1 extra for CPU bind. Note that there are potentially
- * many vma per object/dma-resv, however the fence slot will just be
- * re-used, since they are largely the same timeline and the seqno
- * should be in order. In the case of CPU bind there is dummy fence used
- * for all CPU binds, so no need to have a per-tile slot for that.
- */
- num_fences = 1 + 1 + vm->xe->info.tile_count;
- /*
- * We don't know upfront exactly how many fence slots we will need at
- * the start of the exec, since the TTM bo_validate above can consume
- * numerous fence slots. Also due to how the dma_resv_reserve_fences()
- * works it only ensures that at least that many fence slots are
- * available i.e if there are already 10 slots available and we reserve
- * two more, it can just noop without reserving anything. With this it
- * is quite possible that TTM steals some of the fence slots and then
- * when it comes time to do the vma binding and final exec stage we are
- * lacking enough fence slots, leading to some nasty BUG_ON() when
- * adding the fences. Hence just add our own fences here, after the
- * validate stage.
- */
- drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) {
- ret = dma_resv_reserve_fences(obj->resv, num_fences);
- if (ret)
- return ret;
- }
-
- return 0;
+ /* The fence slot added here is intended for the exec sched job. */
+ return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -152,7 +120,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job;
- struct dma_fence *rebind_fence;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
ktime_t end = 0;
@@ -249,7 +216,7 @@ retry:
goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(q, vm, fence);
dma_fence_put(fence);
}
@@ -290,39 +257,7 @@ retry:
goto err_exec;
}
- /*
- * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
- * VM mode only.
- */
- rebind_fence = xe_vm_rebind(vm, false);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
- goto err_put_job;
- }
-
- /*
- * We store the rebind_fence in the VM so subsequent execs don't get
- * scheduled before the rebinds of userptrs / evicted BOs is complete.
- */
- if (rebind_fence) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = rebind_fence;
- }
- if (vm->rebind_fence) {
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &vm->rebind_fence->flags)) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = NULL;
- } else {
- dma_fence_get(vm->rebind_fence);
- err = drm_sched_job_add_dependency(&job->drm,
- vm->rebind_fence);
- if (err)
- goto err_put_job;
- }
- }
-
- /* Wait behind munmap style rebinds */
+ /* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
@@ -359,9 +294,10 @@ retry:
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], job,
- &job->drm.s_fence->finished);
+ for (i = 0; i < num_syncs; i++) {
+ xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+ xe_sched_job_init_user_fence(job, &syncs[i]);
+ }
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job);
@@ -385,10 +321,7 @@ err_put_job:
err_exec:
drm_exec_fini(exec);
err_unlock_list:
- if (write_locked)
- up_write(&vm->lock);
- else
- up_read(&vm->lock);
+ up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
goto retry;
err_syncs:
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 11e150f4c0c1..395de93579fa 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -31,7 +31,14 @@ enum xe_exec_queue_sched_prop {
};
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
- u64 extensions, int ext_number, bool create);
+ u64 extensions, int ext_number);
+
+static void __xe_exec_queue_free(struct xe_exec_queue *q)
+{
+ if (q->vm)
+ xe_vm_put(q->vm);
+ kfree(q);
+}
static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm,
@@ -74,21 +81,21 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (vm)
+ q->vm = xe_vm_get(vm);
+
if (extensions) {
/*
* may set q->usm, must come before xe_lrc_init(),
* may overwrite q->sched_props, must come before q->ops->init()
*/
- err = exec_queue_user_extensions(xe, q, extensions, 0, true);
+ err = exec_queue_user_extensions(xe, q, extensions, 0);
if (err) {
- kfree(q);
+ __xe_exec_queue_free(q);
return ERR_PTR(err);
}
}
- if (vm)
- q->vm = xe_vm_get(vm);
-
if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
@@ -97,13 +104,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
return q;
}
-static void __xe_exec_queue_free(struct xe_exec_queue *q)
-{
- if (q->vm)
- xe_vm_put(q->vm);
- kfree(q);
-}
-
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
@@ -128,7 +128,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
* already grabbed the rpm ref outside any sensitive locks.
*/
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
- drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
+ xe_pm_runtime_get_noresume(xe);
return 0;
@@ -217,7 +217,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
for (i = 0; i < q->width; ++i)
xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
- xe_device_mem_access_put(gt_to_xe(q->gt));
+ xe_pm_runtime_put(gt_to_xe(q->gt));
__xe_exec_queue_free(q);
}
@@ -225,22 +225,22 @@ void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
{
switch (q->class) {
case XE_ENGINE_CLASS_RENDER:
- sprintf(q->name, "rcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "rcs%d", instance);
break;
case XE_ENGINE_CLASS_VIDEO_DECODE:
- sprintf(q->name, "vcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "vcs%d", instance);
break;
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
- sprintf(q->name, "vecs%d", instance);
+ snprintf(q->name, sizeof(q->name), "vecs%d", instance);
break;
case XE_ENGINE_CLASS_COPY:
- sprintf(q->name, "bcs%d", instance);
+ snprintf(q->name, sizeof(q->name), "bcs%d", instance);
break;
case XE_ENGINE_CLASS_COMPUTE:
- sprintf(q->name, "ccs%d", instance);
+ snprintf(q->name, sizeof(q->name), "ccs%d", instance);
break;
case XE_ENGINE_CLASS_OTHER:
- sprintf(q->name, "gsccs%d", instance);
+ snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
break;
default:
XE_WARN_ON(q->class);
@@ -268,7 +268,7 @@ xe_exec_queue_device_get_max_priority(struct xe_device *xe)
}
static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
+ u64 value)
{
if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
return -EINVAL;
@@ -276,9 +276,6 @@ static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
return -EPERM;
- if (!create)
- return q->ops->set_priority(q, value);
-
q->sched_props.priority = value;
return 0;
}
@@ -336,7 +333,7 @@ xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
}
static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
+ u64 value)
{
u32 min = 0, max = 0;
@@ -347,16 +344,13 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- if (!create)
- return q->ops->set_timeslice(q, value);
-
q->sched_props.timeslice_us = value;
return 0;
}
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 value, bool create);
+ u64 value);
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
@@ -365,8 +359,7 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
static int exec_queue_user_ext_set_property(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 extension,
- bool create)
+ u64 extension)
{
u64 __user *address = u64_to_user_ptr(extension);
struct drm_xe_ext_set_property ext;
@@ -388,21 +381,20 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (!exec_queue_set_property_funcs[idx])
return -EINVAL;
- return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
+ return exec_queue_set_property_funcs[idx](xe, q, ext.value);
}
typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
- u64 extension,
- bool create);
+ u64 extension);
-static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
+static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
};
#define MAX_USER_EXTENSIONS 16
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
- u64 extensions, int ext_number, bool create)
+ u64 extensions, int ext_number)
{
u64 __user *address = u64_to_user_ptr(extensions);
struct drm_xe_user_extension ext;
@@ -423,13 +415,13 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
idx = array_index_nospec(ext.name,
ARRAY_SIZE(exec_queue_user_extension_funcs));
- err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
+ err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
if (XE_IOCTL_DBG(xe, err))
return err;
if (ext.next_extension)
return exec_queue_user_extensions(xe, q, ext.next_extension,
- ++ext_number, create);
+ ++ext_number);
return 0;
}
@@ -448,7 +440,7 @@ find_hw_engine(struct xe_device *xe,
{
u32 idx;
- if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
if (eci.gt_id >= xe->info.gt_count)
@@ -597,7 +589,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
/* The migration vm doesn't hold rpm ref */
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
@@ -606,7 +598,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
args->width, hwe, flags,
args->extensions);
- xe_device_mem_access_put(xe); /* now held by engine */
+ xe_pm_runtime_put(xe); /* now held by engine */
xe_vm_put(migrate_vm);
if (IS_ERR(new)) {
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 62b3d9d1d7cd..ee78d497d838 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -76,14 +76,12 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
/* kernel engine only destroyed at driver unload */
#define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
-/* queue keeps running pending jobs after destroy ioctl */
-#define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
-#define EXEC_QUEUE_FLAG_VM BIT(4)
+#define EXEC_QUEUE_FLAG_VM BIT(3)
/* child of VM queue for multi-tile VM jobs */
-#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
+#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(4)
/* kernel exec_queue only, set priority to highest level */
-#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
+#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(5)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -148,6 +146,11 @@ struct xe_exec_queue {
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
+ /**
+ * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
+ * Protected by @vm's resv. Unused if @vm == NULL.
+ */
+ u64 tlb_flush_seqno;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc lrc[];
};
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index ab96edb058d6..0d541f55b4fc 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,12 +5,14 @@
#include "xe_ggtt.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include "regs/xe_gt_regs.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
@@ -19,16 +21,10 @@
#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
-#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_wopcm.h"
-#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
-#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
-
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP 0xFEE00000
-
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index)
{
@@ -200,20 +196,20 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
}
+static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
+
static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
{
struct drm_mm_node *hole;
u64 start, end;
/* Display may have allocated inside ggtt, so be careful with clearing here */
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
xe_ggtt_clear(ggtt, start, end - start);
xe_ggtt_invalidate(ggtt);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
}
int xe_ggtt_init(struct xe_ggtt *ggtt)
@@ -227,11 +223,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entires, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_CREATE_PINNED_BIT;
+ flags = XE_BO_FLAG_PINNED;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
- flags |= XE_BO_CREATE_SYSTEM_BIT;
+ flags |= XE_BO_FLAG_SYSTEM;
else
- flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
+ flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
if (IS_ERR(ggtt->scratch)) {
@@ -249,51 +245,19 @@ err:
return err;
}
-#define GUC_TLB_INV_CR XE_REG(0xcee8)
-#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
-#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
-#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
-#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
-#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
-
static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
{
+ int err;
+
if (!gt)
return;
- /*
- * Invalidation can happen when there's no in-flight work keeping the
- * GT awake. We need to explicitly grab forcewake to ensure the GT
- * and GuC are accessible.
- */
- xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-
- /* TODO: vfunc for GuC vs. non-GuC */
-
- if (gt->uc.guc.submission_state.enabled) {
- int seqno;
-
- seqno = xe_gt_tlb_invalidation_guc(gt);
- xe_gt_assert(gt, seqno > 0);
- if (seqno > 0)
- xe_gt_tlb_invalidation_wait(gt, seqno);
- } else if (xe_device_uc_enabled(gt_to_xe(gt))) {
- struct xe_device *xe = gt_to_xe(gt);
-
- if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
- PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
- PVC_GUC_TLB_INV_DESC0_VALID);
- } else
- xe_mmio_write32(gt, GUC_TLB_INV_CR,
- GUC_TLB_INV_CR_INVALIDATE);
- }
-
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ err = xe_gt_tlb_invalidation_ggtt(gt);
+ if (err)
+ drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
}
-void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
+static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
@@ -410,7 +374,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
@@ -419,8 +383,6 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
-
- xe_ggtt_invalidate(ggtt);
}
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
@@ -442,14 +404,17 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (err)
return err;
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err)
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+
+ if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
+ xe_ggtt_invalidate(ggtt);
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
@@ -465,19 +430,21 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
}
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ bool invalidate)
{
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
- mutex_lock(&ggtt->lock);
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
drm_mm_remove_node(node);
node->size = 0;
+ mutex_unlock(&ggtt->lock);
- xe_ggtt_invalidate(ggtt);
+ if (invalidate)
+ xe_ggtt_invalidate(ggtt);
- mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
@@ -488,8 +455,53 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
/* This BO is not currently in the GGTT */
xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
- xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
+ xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
+ bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+}
+
+#ifdef CONFIG_PCI_IOV
+static u64 xe_encode_vfid_pte(u16 vfid)
+{
+ return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
+}
+
+static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+{
+ u64 start = node->start;
+ u64 size = node->size;
+ u64 end = start + size - 1;
+ u64 pte = xe_encode_vfid_pte(vfid);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (!drm_mm_node_allocated(node))
+ return;
+
+ while (start < end) {
+ xe_ggtt_set_pte(ggtt, start, pte);
+ start += XE_PAGE_SIZE;
+ }
+
+ xe_ggtt_invalidate(ggtt);
+}
+
+/**
+ * xe_ggtt_assign - assign a GGTT region to the VF
+ * @ggtt: the &xe_ggtt where the node belongs
+ * @node: the &drm_mm_node to update
+ * @vfid: the VF identifier
+ *
+ * This function is used by the PF driver to assign a GGTT region to the VF.
+ * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
+ * platforms VFs can't modify that either.
+ */
+void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+{
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_assign_locked(ggtt, node, vfid);
+ mutex_unlock(&ggtt->lock);
}
+#endif
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 42705e1338e1..4a41a1762358 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -11,7 +11,6 @@
struct drm_printer;
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte);
-void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
@@ -24,7 +23,8 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
struct drm_mm_node *node,
u32 size, u32 align, u32 mm_flags);
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node);
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ bool invalidate);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
@@ -33,4 +33,8 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+#ifdef CONFIG_PCI_IOV
+void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index a61994292c43..60202b903687 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -17,15 +17,18 @@
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_huc.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
#include "instructions/xe_gsc_commands.h"
#include "regs/xe_gsc_regs.h"
+#include "regs/xe_gt_regs.h"
static struct xe_gt *
gsc_to_gt(struct xe_gsc *gsc)
@@ -127,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@@ -250,9 +253,30 @@ static int gsc_upload(struct xe_gsc *gsc)
static int gsc_upload_and_init(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
int ret;
+ if (XE_WA(gt, 14018094691)) {
+ ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+
+ /*
+ * If the forcewake fails we want to keep going, because the worst
+ * case outcome in failing to apply the WA is that PXP won't work,
+ * which is not fatal. We still throw a warning so the issue is
+ * seen if it happens.
+ */
+ xe_gt_WARN_ON(tile->primary_gt, ret);
+
+ xe_gt_mcr_multicast_write(tile->primary_gt,
+ EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
+ EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
+ }
+
ret = gsc_upload(gsc);
+
+ if (XE_WA(gt, 14018094691))
+ xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+
if (ret)
return ret;
@@ -272,6 +296,44 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
return 0;
}
+static int gsc_er_complete(struct xe_gt *gt)
+{
+ u32 er_status;
+
+ if (!gsc_fw_is_loaded(gt))
+ return 0;
+
+ /*
+ * Starting on Xe2, the GSCCS engine reset is a 2-step process. When the
+ * driver or the GuC hit the GDRST register, the CS is immediately reset
+ * and a success is reported, but the GSC shim keeps resetting in the
+ * background. While the shim reset is ongoing, the CS is able to accept
+ * new context submission, but any commands that require the shim will
+ * be stalled until the reset is completed. This means that we can keep
+ * submitting to the GSCCS as long as we make sure that the preemption
+ * timeout is big enough to cover any delay introduced by the reset.
+ * When the shim reset completes, a specific CS interrupt is triggered,
+ * in response to which we need to check the GSCI_TIMER_STATUS register
+ * to see if the reset was successful or not.
+ * Note that the GSCI_TIMER_STATUS register is not power save/restored,
+ * so it gets reset on MC6 entry. However, a reset failure stops MC6,
+ * so in that scenario we're always guaranteed to find the correct
+ * value.
+ */
+ er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
+
+ if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
+ /*
+ * XXX: we should trigger an FLR here, but we don't have support
+ * for that yet.
+ */
+ xe_gt_err(gt, "GSC ER timed out!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void gsc_work(struct work_struct *work)
{
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
@@ -285,8 +347,14 @@ static void gsc_work(struct work_struct *work)
gsc->work_actions = 0;
spin_unlock_irq(&gsc->lock);
- xe_device_mem_access_get(xe);
- xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ xe_pm_runtime_get(xe);
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
+
+ if (actions & GSC_ACTION_ER_COMPLETE) {
+ ret = gsc_er_complete(gt);
+ if (ret)
+ goto out;
+ }
if (actions & GSC_ACTION_FW_LOAD) {
ret = gsc_upload_and_init(gsc);
@@ -299,8 +367,26 @@ static void gsc_work(struct work_struct *work)
if (actions & GSC_ACTION_SW_PROXY)
xe_gsc_proxy_request_handler(gsc);
+out:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
+}
+
+void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ if (unlikely(!intr_vec))
+ return;
+
+ if (intr_vec & GSC_ER_COMPLETE) {
+ spin_lock(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_ER_COMPLETE;
+ spin_unlock(&gsc->lock);
+
+ queue_work(gsc->wq, &gsc->work);
+ }
}
int xe_gsc_init(struct xe_gsc *gsc)
@@ -382,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_CREATE_STOLEN_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index c6fb32e3fd79..dd16e9b8b894 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -9,12 +9,14 @@
#include "xe_gsc_types.h"
struct xe_gt;
+struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_remove(struct xe_gsc *gsc);
+void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 309ef80e3b95..1b908d238bd1 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -66,7 +66,7 @@ static inline struct xe_device *kdev_to_xe(struct device *kdev)
return dev_get_drvdata(kdev);
}
-static bool gsc_proxy_init_done(struct xe_gsc *gsc)
+bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
@@ -403,7 +403,6 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
void *csme;
- int err;
csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
if (!csme)
@@ -411,8 +410,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
kfree(csme);
return PTR_ERR(bo);
@@ -424,11 +423,7 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
gsc->proxy.to_csme = csme;
gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
- err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
}
/**
@@ -528,7 +523,7 @@ int xe_gsc_proxy_start(struct xe_gsc *gsc)
if (err)
return err;
- if (!gsc_proxy_init_done(gsc)) {
+ if (!xe_gsc_proxy_init_done(gsc)) {
xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index 908f9441f093..c511ade6b863 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -11,6 +11,7 @@
struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
+bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
void xe_gsc_proxy_remove(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
index 348994b271be..d34d03248843 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.c
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -41,6 +41,21 @@ gsc_to_gt(struct xe_gsc *gsc)
}
/**
+ * xe_gsc_create_host_session_id - Creates a random 64 bit host_session id with
+ * bits 56-63 masked.
+ *
+ * Returns: random host_session_id which can be used to send messages to gsc cs
+ */
+u64 xe_gsc_create_host_session_id(void)
+{
+ u64 host_session_id;
+
+ get_random_bytes(&host_session_id, sizeof(u64));
+ host_session_id &= ~HOST_SESSION_CLIENT_MASK;
+ return host_session_id;
+}
+
+/**
* xe_gsc_emit_header - write the MTL GSC header in memory
* @xe: the Xe device
* @map: the iosys map to write to
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h
index 1939855031a6..1416b5745a4c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.h
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.h
@@ -28,4 +28,5 @@ int xe_gsc_read_out_header(struct xe_device *xe,
int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
u64 addr_out, u32 size_out);
+u64 xe_gsc_create_host_session_id(void);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 138d8cc0f19c..5926de20214c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -47,6 +47,7 @@ struct xe_gsc {
u32 work_actions;
#define GSC_ACTION_FW_LOAD BIT(0)
#define GSC_ACTION_SW_PROXY BIT(1)
+#define GSC_ACTION_ER_COMPLETE BIT(2)
/** @proxy: sub-structure containing the SW proxy-related variables */
struct {
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index a0afe1ba6dd5..491d0413de15 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -29,6 +29,7 @@
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
@@ -43,6 +44,7 @@
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
+#include "xe_pm.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
#include "xe_ring_ops.h"
@@ -310,6 +312,12 @@ int xe_gt_init_early(struct xe_gt *gt)
{
int err;
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_pf_init_early(gt);
+ if (err)
+ return err;
+ }
+
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
return err;
@@ -346,7 +354,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
{
int err, i;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_hw_fence_irq;
@@ -359,7 +366,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
}
- xe_gt_idle_sysfs_init(&gt->gtidle);
+ err = xe_gt_idle_sysfs_init(&gt->gtidle);
+ if (err)
+ goto err_force_wake;
/* Enable per hw engine IRQs */
xe_irq_enable_hwe(gt);
@@ -373,12 +382,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err = xe_hw_engine_class_sysfs_init(gt);
if (err)
- drm_warn(&gt_to_xe(gt)->drm,
- "failed to register engines sysfs directory, err: %d\n",
- err);
+ goto err_force_wake;
/* Initialize CCS mode sysfs after early initialization of HW engines */
- xe_gt_ccs_mode_sysfs_init(gt);
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist
@@ -388,7 +397,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
XE_WARN_ON(err);
- xe_device_mem_access_put(gt_to_xe(gt));
return 0;
@@ -398,7 +406,6 @@ err_force_wake:
err_hw_fence_irq:
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
- xe_device_mem_access_put(gt_to_xe(gt));
return err;
}
@@ -407,7 +414,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
{
int err, i;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_hw_fence_irq;
@@ -473,7 +479,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
XE_WARN_ON(err);
- xe_device_mem_access_put(gt_to_xe(gt));
return 0;
@@ -482,7 +487,6 @@ err_force_wake:
err_hw_fence_irq:
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
- xe_device_mem_access_put(gt_to_xe(gt));
return err;
}
@@ -495,7 +499,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
{
int err;
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto out;
@@ -518,8 +521,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
out_fw:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
out:
- xe_device_mem_access_put(gt_to_xe(gt));
-
return err;
}
@@ -545,13 +546,17 @@ int xe_gt_init(struct xe_gt *gt)
xe_mocs_init_early(gt);
- xe_gt_sysfs_init(gt);
+ err = xe_gt_sysfs_init(gt);
+ if (err)
+ return err;
err = gt_fw_domain_init(gt);
if (err)
return err;
- xe_gt_freq_init(gt);
+ err = xe_gt_freq_init(gt);
+ if (err)
+ return err;
xe_force_wake_init_engines(gt, gt_to_fw(gt));
@@ -559,11 +564,7 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
}
static int do_gt_reset(struct xe_gt *gt)
@@ -643,9 +644,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_fail;
}
+ xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_sanitize(gt);
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -669,8 +670,8 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(gt_to_xe(gt));
XE_WARN_ON(err);
+ xe_pm_runtime_put(gt_to_xe(gt));
xe_gt_info(gt, "reset done\n");
@@ -680,7 +681,7 @@ err_out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
XE_WARN_ON(xe_uc_start(&gt->uc));
- xe_device_mem_access_put(gt_to_xe(gt));
+ xe_pm_runtime_put(gt_to_xe(gt));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
@@ -710,22 +711,20 @@ void xe_gt_reset_async(struct xe_gt *gt)
void xe_gt_suspend_prepare(struct xe_gt *gt)
{
- xe_device_mem_access_get(gt_to_xe(gt));
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
xe_uc_stop_prepare(&gt->uc);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
}
int xe_gt_suspend(struct xe_gt *gt)
{
int err;
+ xe_gt_dbg(gt, "suspending\n");
xe_gt_sanitize(gt);
- xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -735,15 +734,13 @@ int xe_gt_suspend(struct xe_gt *gt)
goto err_force_wake;
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
- xe_gt_info(gt, "suspended\n");
+ xe_gt_dbg(gt, "suspended\n");
return 0;
err_force_wake:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
- xe_device_mem_access_put(gt_to_xe(gt));
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
return err;
@@ -753,7 +750,7 @@ int xe_gt_resume(struct xe_gt *gt)
{
int err;
- xe_device_mem_access_get(gt_to_xe(gt));
+ xe_gt_dbg(gt, "resuming\n");
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
goto err_msg;
@@ -763,15 +760,13 @@ int xe_gt_resume(struct xe_gt *gt)
goto err_force_wake;
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
- xe_device_mem_access_put(gt_to_xe(gt));
- xe_gt_info(gt, "resumed\n");
+ xe_gt_dbg(gt, "resumed\n");
return 0;
err_force_wake:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
- xe_device_mem_access_put(gt_to_xe(gt));
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index 529fc286cd06..396aeb5b9924 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
- return;
+ return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
- if (err) {
- drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
- return;
- }
+ if (err)
+ return err;
- err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
- if (err) {
- sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- }
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
index f39975aaaab0..f8779852cf0d 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 937054e31d72..c7bca20f6b65 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -78,8 +78,3 @@ int xe_gt_clock_init(struct xe_gt *gt)
gt->info.reference_clock = freq;
return 0;
}
-
-u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count)
-{
- return DIV_ROUND_CLOSEST_ULL(count * NSEC_PER_SEC, gt->info.reference_clock);
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.h b/drivers/gpu/drm/xe/xe_gt_clock.h
index aa162722f859..44fa0371b973 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.h
+++ b/drivers/gpu/drm/xe/xe_gt_clock.h
@@ -11,5 +11,5 @@
struct xe_gt;
int xe_gt_clock_init(struct xe_gt *gt);
-u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index c4b67cf09f8f..8cf0b2625efc 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -5,6 +5,8 @@
#include "xe_gt_debugfs.h"
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
@@ -18,193 +20,246 @@
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_pat.h"
+#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
-static struct xe_gt *node_to_gt(struct drm_info_node *node)
+/**
+ * xe_gt_debugfs_simple_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_gt specific.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_gt *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_gt *gt, struct drm_printer *p)
+ * {
+ * drm_printf(p, "GT%u\n", gt->info.id);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = xe_gt_debugfs_simple_show, .data = foo },
+ * };
+ *
+ * dir = debugfs_create_dir("gt", parent);
+ * dir->d_inode->i_private = gt;
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), dir, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
{
- return node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_gt *gt = parent->d_inode->i_private;
+ int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data;
+
+ if (WARN_ON(!print))
+ return -EINVAL;
+
+ return print(gt, &p);
}
-static int hw_engines(struct seq_file *m, void *data)
+static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
struct xe_device *xe = gt_to_xe(gt);
- struct drm_printer p = drm_seq_file_printer(m);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
int err;
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err) {
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return err;
}
for_each_hw_engine(hwe, gt, id)
- xe_hw_engine_print(hwe, &p);
+ xe_hw_engine_print(hwe, p);
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
if (err)
return err;
return 0;
}
-static int force_reset(struct seq_file *m, void *data)
+static int force_reset(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
-
+ xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_reset_async(gt);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int sa_info(struct seq_file *m, void *data)
+static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_tile *tile = gt_to_tile(node_to_gt(m->private));
- struct drm_printer p = drm_seq_file_printer(m);
+ struct xe_tile *tile = gt_to_tile(gt);
- drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, &p,
+ xe_pm_runtime_get(gt_to_xe(gt));
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
tile->mem.kernel_bb_pool->gpu_addr);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int topology(struct seq_file *m, void *data)
+static int topology(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_gt_topology_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_gt_topology_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int steering(struct seq_file *m, void *data)
+static int steering(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_gt_mcr_steering_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_gt_mcr_steering_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int ggtt(struct seq_file *m, void *data)
+static int ggtt(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
+ int ret;
+
+ xe_pm_runtime_get(gt_to_xe(gt));
+ ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
- return xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, &p);
+ return ret;
}
-static int register_save_restore(struct seq_file *m, void *data)
+static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- xe_reg_sr_dump(&gt->reg_sr, &p);
- drm_printf(&p, "\n");
+ xe_pm_runtime_get(gt_to_xe(gt));
- drm_printf(&p, "Engine\n");
+ xe_reg_sr_dump(&gt->reg_sr, p);
+ drm_printf(p, "\n");
+
+ drm_printf(p, "Engine\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_sr_dump(&hwe->reg_sr, &p);
- drm_printf(&p, "\n");
+ xe_reg_sr_dump(&hwe->reg_sr, p);
+ drm_printf(p, "\n");
- drm_printf(&p, "LRC\n");
+ drm_printf(p, "LRC\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_sr_dump(&hwe->reg_lrc, &p);
- drm_printf(&p, "\n");
+ xe_reg_sr_dump(&hwe->reg_lrc, p);
+ drm_printf(p, "\n");
- drm_printf(&p, "Whitelist\n");
+ drm_printf(p, "Whitelist\n");
for_each_hw_engine(hwe, gt, id)
- xe_reg_whitelist_dump(&hwe->reg_whitelist, &p);
+ xe_reg_whitelist_dump(&hwe->reg_whitelist, p);
+
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int workarounds(struct seq_file *m, void *data)
+static int workarounds(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_wa_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_wa_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int pat(struct seq_file *m, void *data)
+static int pat(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt *gt = node_to_gt(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pat_dump(gt, &p);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_pat_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
return 0;
}
-static int rcs_default_lrc(struct seq_file *m, void *data)
+static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_RENDER);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_RENDER);
return 0;
}
-static int ccs_default_lrc(struct seq_file *m, void *data)
+static int ccs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COMPUTE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COMPUTE);
return 0;
}
-static int bcs_default_lrc(struct seq_file *m, void *data)
+static int bcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COPY);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COPY);
return 0;
}
-static int vcs_default_lrc(struct seq_file *m, void *data)
+static int vcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_DECODE);
return 0;
}
-static int vecs_default_lrc(struct seq_file *m, void *data)
+static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ xe_pm_runtime_put(gt_to_xe(gt));
- xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_ENHANCE);
return 0;
}
static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", hw_engines, 0},
- {"force_reset", force_reset, 0},
- {"sa_info", sa_info, 0},
- {"topology", topology, 0},
- {"steering", steering, 0},
- {"ggtt", ggtt, 0},
- {"register-save-restore", register_save_restore, 0},
- {"workarounds", workarounds, 0},
- {"pat", pat, 0},
- {"default_lrc_rcs", rcs_default_lrc},
- {"default_lrc_ccs", ccs_default_lrc},
- {"default_lrc_bcs", bcs_default_lrc},
- {"default_lrc_vcs", vcs_default_lrc},
- {"default_lrc_vecs", vecs_default_lrc},
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
+ {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
+ {"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+ {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
+ {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
+ {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
+ {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
+ {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
+ {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
+ {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
};
void xe_gt_debugfs_register(struct xe_gt *gt)
@@ -212,13 +267,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
struct dentry *root;
- struct drm_info_list *local;
char name[8];
- int i;
xe_gt_assert(gt, minor->debugfs_root);
- sprintf(name, "gt%d", gt->info.id);
+ snprintf(name, sizeof(name), "gt%d", gt->info.id);
root = debugfs_create_dir(name, minor->debugfs_root);
if (IS_ERR(root)) {
drm_warn(&xe->drm, "Create GT directory failed");
@@ -226,22 +279,13 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
}
/*
- * Allocate local copy as we need to pass in the GT to the debugfs
- * entry and drm_debugfs_create_files just references the drm_info_list
- * passed in (e.g. can't define this on the stack).
+ * Store the xe_gt pointer as private data of the gt/ directory node
+ * so other GT specific attributes under that directory may refer to
+ * it by looking at its parent node private data.
*/
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
-
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
-
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = gt;
+ root->d_inode->i_private = gt;
- drm_debugfs_create_files(local,
+ drm_debugfs_create_files(debugfs_list,
ARRAY_SIZE(debugfs_list),
root, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h
index 5a329f118a57..05a6cc93c78c 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h
@@ -6,8 +6,10 @@
#ifndef _XE_GT_DEBUGFS_H_
#define _XE_GT_DEBUGFS_H_
+struct seq_file;
struct xe_gt;
void xe_gt_debugfs_register(struct xe_gt *gt);
+int xe_gt_debugfs_simple_show(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index e5b0f4ecdbe8..855de40e40ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -15,6 +15,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_guc_pc.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Frequency Management
@@ -49,12 +50,23 @@ dev_to_pc(struct device *dev)
return &kobj_to_gt(dev->kobj.parent)->uc.guc.pc;
}
+static struct xe_device *
+dev_to_xe(struct device *dev)
+{
+ return gt_to_xe(kobj_to_gt(dev->kobj.parent));
+}
+
static ssize_t act_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_act_freq(pc));
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_act_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(act_freq);
@@ -65,7 +77,9 @@ static ssize_t cur_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_cur_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -77,8 +91,13 @@ static ssize_t rp0_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rp0_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rp0_freq);
@@ -86,8 +105,13 @@ static ssize_t rpe_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpe_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpe_freq(pc));
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rpe_freq);
@@ -107,7 +131,9 @@ static ssize_t min_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_min_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -125,7 +151,9 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_min_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -140,7 +168,9 @@ static ssize_t max_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_max_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -158,7 +188,9 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_max_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -190,33 +222,28 @@ static void freq_fini(struct drm_device *drm, void *arg)
* @gt: Xe GT object
*
* It needs to be initialized after GT Sysfs and GuC PC components are ready.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_freq_init(struct xe_gt *gt)
+int xe_gt_freq_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (xe->info.skip_guc_pc)
- return;
+ return 0;
gt->freq = kobject_create_and_add("freq0", gt->sysfs);
- if (!gt->freq) {
- drm_warn(&xe->drm, "failed to add freq0 directory to %s\n",
- kobject_name(gt->sysfs));
- return;
- }
+ if (!gt->freq)
+ return -ENOMEM;
err = drmm_add_action_or_reset(&xe->drm, freq_fini, gt->freq);
- if (err) {
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return;
- }
+ if (err)
+ return err;
err = sysfs_create_files(gt->freq, freq_attrs);
if (err)
- drm_warn(&xe->drm, "failed to add freq attrs to %s, err: %d\n",
- kobject_name(gt->freq), err);
+ return err;
- xe_gt_throttle_sysfs_init(gt);
+ return xe_gt_throttle_sysfs_init(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.h b/drivers/gpu/drm/xe/xe_gt_freq.h
index f3fe3c90491a..b7fddbe7b9b6 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.h
+++ b/drivers/gpu/drm/xe/xe_gt_freq.h
@@ -8,6 +8,6 @@
struct xe_gt;
-void xe_gt_freq_init(struct xe_gt *gt);
+int xe_gt_freq_init(struct xe_gt *gt);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9fcae65b6469..8fc0f3f6ecc5 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -12,6 +12,7 @@
#include "xe_guc_pc.h"
#include "regs/xe_gt_regs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Idle
@@ -40,6 +41,15 @@ static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
return &gtidle_to_gt(gtidle)->uc.guc.pc;
}
+static struct xe_device *
+pc_to_xe(struct xe_guc_pc *pc)
+{
+ struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
+ struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
+
+ return gt_to_xe(gt);
+}
+
static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
{
switch (state) {
@@ -86,8 +96,14 @@ static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ ssize_t ret;
+
+ xe_pm_runtime_get(pc_to_xe(pc));
+ ret = sysfs_emit(buff, "%s\n", gtidle->name);
+ xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%s\n", gtidle->name);
+ return ret;
}
static DEVICE_ATTR_RO(name);
@@ -98,7 +114,9 @@ static ssize_t idle_status_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
+ xe_pm_runtime_get(pc_to_xe(pc));
state = gtidle->idle_status(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
@@ -111,7 +129,10 @@ static ssize_t idle_residency_ms_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
+ xe_pm_runtime_get(pc_to_xe(pc));
residency = gtidle->idle_residency(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
+
return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
}
static DEVICE_ATTR_RO(idle_residency_ms);
@@ -131,7 +152,7 @@ static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
-void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
+int xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
{
struct xe_gt *gt = gtidle_to_gt(gtidle);
struct xe_device *xe = gt_to_xe(gt);
@@ -139,16 +160,14 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
int err;
kobj = kobject_create_and_add("gtidle", gt->sysfs);
- if (!kobj) {
- drm_warn(&xe->drm, "%s failed, err: %d\n", __func__, -ENOMEM);
- return;
- }
+ if (!kobj)
+ return -ENOMEM;
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc", gt->info.id);
+ snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc", gt->info.id);
+ snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
@@ -159,14 +178,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
err = sysfs_create_files(kobj, gt_idle_attrs);
if (err) {
kobject_put(kobj);
- drm_warn(&xe->drm, "failed to register gtidle sysfs, err: %d\n", err);
- return;
+ return err;
}
- err = drmm_add_action_or_reset(&xe->drm, gt_idle_sysfs_fini, kobj);
- if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, gt_idle_sysfs_fini, kobj);
}
void xe_gt_idle_enable_c6(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 69280fd16b03..75bd99659b1b 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -10,7 +10,7 @@
struct xe_gt;
-void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle);
+int xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle);
void xe_gt_idle_enable_c6(struct xe_gt *gt);
void xe_gt_idle_disable_c6(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index a7ab9ba645f9..577bd7043740 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -6,6 +6,7 @@
#include "xe_gt_mcr.h"
#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
@@ -294,14 +295,40 @@ static void init_steering_mslice(struct xe_gt *gt)
gt->steering[LNCF].instance_target = 0; /* unused */
}
-static void init_steering_dss(struct xe_gt *gt)
+static unsigned int dss_per_group(struct xe_gt *gt)
{
- unsigned int dss = min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
- xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0));
- unsigned int dss_per_grp = gt_to_xe(gt)->info.platform == XE_PVC ? 8 : 4;
+ if (gt_to_xe(gt)->info.platform == XE_PVC)
+ return 8;
+ else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ return 4;
+ else
+ return 6;
+}
- gt->steering[DSS].group_target = dss / dss_per_grp;
- gt->steering[DSS].instance_target = dss % dss_per_grp;
+/**
+ * xe_gt_mcr_get_dss_steering - Get the group/instance steering for a DSS
+ * @gt: GT structure
+ * @dss: DSS ID to obtain steering for
+ * @group: pointer to storage for steering group ID
+ * @instance: pointer to storage for steering instance ID
+ */
+void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
+{
+ int dss_per_grp = dss_per_group(gt);
+
+ xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
+
+ *group = dss / dss_per_grp;
+ *instance = dss % dss_per_grp;
+}
+
+static void init_steering_dss(struct xe_gt *gt)
+{
+ xe_gt_mcr_get_dss_steering(gt,
+ min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
+ xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)),
+ &gt->steering[DSS].group_target,
+ &gt->steering[DSS].instance_target);
}
static void init_steering_oaddrm(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 27ca1bc880a0..a7f4ab1aa584 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -7,6 +7,7 @@
#define _XE_GT_MCR_H_
#include "regs/xe_reg_defs.h"
+#include "xe_gt_topology.h"
struct drm_printer;
struct xe_gt;
@@ -25,5 +26,18 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+
+/*
+ * Loop over each DSS and determine the group and instance IDs that
+ * should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_dss_steering(dss, gt, group, instance) \
+ for_each_dss((dss), (gt)) \
+ for_each_if((xe_gt_mcr_get_dss_steering((gt), (dss), &(group), &(instance)), true))
#endif /* _XE_GT_MCR_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 241c294270d9..fa9e9853c53b 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -100,10 +100,9 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- unsigned int num_shared = 2; /* slots for bind + move */
int err;
- err = xe_vm_prepare_vma(exec, vma, num_shared);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
new file mode 100644
index 000000000000..791dcdd767e2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_gt_sriov_pf.h"
+#include "xe_gt_sriov_pf_helpers.h"
+
+/*
+ * VF's metadata is maintained in the flexible array where:
+ * - entry [0] contains metadata for the PF (only if applicable),
+ * - entries [1..n] contain metadata for VF1..VFn::
+ *
+ * <--------------------------- 1 + total_vfs ----------->
+ * +-------+-------+-------+-----------------------+-------+
+ * | 0 | 1 | 2 | | n |
+ * +-------+-------+-------+-----------------------+-------+
+ * | PF | VF1 | VF2 | ... ... | VFn |
+ * +-------+-------+-------+-----------------------+-------+
+ */
+static int pf_alloc_metadata(struct xe_gt *gt)
+{
+ unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
+
+ gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
+ sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
+ if (!gt->sriov.pf.vfs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Early initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_alloc_metadata(gt);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
new file mode 100644
index 000000000000..05142ffc4319
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_H_
+#define _XE_GT_SRIOV_PF_H_
+
+struct xe_gt;
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+#else
+static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
new file mode 100644
index 000000000000..79116ad58620
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -0,0 +1,1977 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <linux/string_choices.h>
+#include <linux/wordpart.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_klvs_abi.h"
+
+#include "regs/xe_guc_regs.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_db_mgr.h"
+#include "xe_guc_fwif.h"
+#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
+#include "xe_guc_submit.h"
+#include "xe_lmtt.h"
+#include "xe_map.h"
+#include "xe_sriov.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_wopcm.h"
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
+ u64 addr, u32 size)
+{
+ u32 request[] = {
+ GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
+ vfid,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ size,
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/*
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
+
+ return ret <= 0 ? ret : -EPROTO;
+}
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
+{
+ const u32 bytes = num_dwords * sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(bytes, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
+
+ ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
+
+ xe_bo_unpin_map_no_vm(bo);
+
+ return ret;
+}
+
+/*
+ * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
+ * negative error code on failure.
+ */
+static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ int ret;
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
+
+ if (ret != num_klvs) {
+ int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ struct drm_printer p = xe_gt_info_printer(gt);
+ char name[8];
+
+ xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs), ERR_PTR(err));
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ }
+
+ return 0;
+}
+
+static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
+{
+ u32 klv[] = {
+ FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
+ value,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
+{
+ u32 klv[] = {
+ FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
+ lower_32_bits(value),
+ upper_32_bits(value),
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
+ lower_32_bits(start),
+ upper_32_bits(start),
+ PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
+ lower_32_bits(size),
+ upper_32_bits(size),
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
+ begin,
+ PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
+ num,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
+{
+ u32 klvs[] = {
+ PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
+ begin,
+ PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
+ num,
+ };
+
+ return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
+}
+
+static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum);
+}
+
+static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, preempt_timeout);
+}
+
+static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
+}
+
+static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return &gt->sriov.pf.vfs[vfid].config;
+}
+
+/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
+{
+ u32 n = 0;
+
+ if (drm_mm_node_allocated(&config->ggtt_region)) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(config->ggtt_region.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region.start);
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(config->ggtt_region.size);
+ cfg[n++] = upper_32_bits(config->ggtt_region.size);
+ }
+
+ return n;
+}
+
+/* Return: number of configuration dwords written */
+static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
+{
+ u32 n = 0;
+
+ n += encode_config_ggtt(cfg, config);
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
+ cfg[n++] = config->begin_ctx;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
+ cfg[n++] = config->num_ctxs;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
+ cfg[n++] = config->begin_db;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
+ cfg[n++] = config->num_dbs;
+
+ if (config->lmem_obj) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
+ cfg[n++] = lower_32_bits(config->lmem_obj->size);
+ cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ }
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
+ cfg[n++] = config->exec_quantum;
+
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
+ cfg[n++] = config->preempt_timeout;
+
+ return n;
+}
+
+static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ u32 max_cfg_dwords = SZ_4K / sizeof(u32);
+ u32 num_dwords;
+ int num_klvs;
+ u32 *cfg;
+ int err;
+
+ cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ num_dwords = encode_config(cfg, config);
+ xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+
+ if (xe_gt_is_media_type(gt)) {
+ struct xe_gt *primary = gt->tile->primary_gt;
+ struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
+
+ /* media-GT will never include a GGTT config */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
+
+ /* the GGTT config must be taken from the primary-GT instead */
+ num_dwords += encode_config_ggtt(cfg + num_dwords, other);
+ }
+ xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+
+ num_klvs = xe_guc_klv_count(cfg, num_dwords);
+ err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
+
+ kfree(cfg);
+ return err;
+}
+
+static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
+}
+
+static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
+{
+ /* XXX: preliminary */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
+ pf_get_ggtt_alignment(gt) : SZ_64M;
+}
+
+static u64 pf_get_spare_ggtt(struct xe_gt *gt)
+{
+ u64 spare;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.ggtt_size;
+ spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
+{
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (size && size < pf_get_min_spare_ggtt(gt))
+ return -EINVAL;
+
+ size = round_up(size, pf_get_ggtt_alignment(gt));
+ gt->sriov.pf.spare.ggtt_size = size;
+
+ return 0;
+}
+
+static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
+{
+ int err, err2 = 0;
+
+ err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
+
+ if (tile->media_gt && !err)
+ err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
+
+ return err ?: err2;
+}
+
+static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ if (drm_mm_node_allocated(node)) {
+ /*
+ * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
+ * is redundant, as PTE will be implicitly re-assigned to PF by
+ * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
+ */
+ xe_ggtt_remove_node(ggtt, node, false);
+ }
+}
+
+static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
+}
+
+static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ size = round_up(size, alignment);
+
+ if (drm_mm_node_allocated(node)) {
+ err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
+ if (unlikely(err))
+ return err;
+
+ pf_release_ggtt(tile, node);
+ }
+ xe_gt_assert(gt, !drm_mm_node_allocated(node));
+
+ if (!size)
+ return 0;
+
+ err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
+ if (unlikely(err))
+ return err;
+
+ xe_ggtt_assign(ggtt, node, vfid);
+ xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
+ vfid, node->start, node->start + node->size - 1);
+
+ err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
+ if (unlikely(err))
+ return err;
+
+ return 0;
+}
+
+static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct drm_mm_node *node = &config->ggtt_region;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ return drm_mm_node_allocated(node) ? node->size : 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: size of the VF's assigned (or PF's spare) GGTT address space.
+ */
+u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
+{
+ u64 size;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
+ else
+ size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return size;
+}
+
+static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
+ u64 actual, const char *what, int err)
+{
+ char size[10];
+ char name[8];
+
+ xe_sriov_function_name(vfid, name, sizeof(name));
+
+ if (unlikely(err)) {
+ string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
+ name, value, size, what, ERR_PTR(err));
+ string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
+ name, actual, size, what);
+ return err;
+ }
+
+ /* the actual value may have changed during provisioning */
+ string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
+ name, actual, size, what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: the VF identifier
+ * @size: requested GGTT size
+ *
+ * If &vfid represents PF, then function will change PF's spare GGTT config.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ int err;
+
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_ggtt(gt, vfid, size);
+ else
+ err = pf_set_spare_ggtt(gt, size);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u64_done(gt, vfid, size,
+ xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
+ vfid ? "GGTT" : "spare GGTT", err);
+}
+
+static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
+ u64 value, u64 (*get)(struct xe_gt*, unsigned int),
+ const char *what, unsigned int last, int err)
+{
+ char size[10];
+
+ xe_gt_assert(gt, first);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, first <= last);
+
+ if (num_vfs == 1)
+ return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
+ first, first + num_vfs - 1, what);
+ if (last > first)
+ pf_config_bulk_set_u64_done(gt, first, last - first, value,
+ get, what, last, 0);
+ return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
+ }
+
+ /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
+ value = get(gt, first);
+ string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
+ xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
+ first, first + num_vfs - 1, value, size, what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @size: requested GGTT size
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u64 size)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_ggtt(gt, n, size);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
+ xe_gt_sriov_pf_config_get_ggtt,
+ "GGTT", n, err);
+}
+
+/* Return: size of the largest continuous GGTT region */
+static u64 pf_get_max_ggtt(struct xe_gt *gt)
+{
+ struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 spare = pf_get_spare_ggtt(gt);
+ u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 hole_start, hole_end, hole_size;
+ u64 max_hole = 0;
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
+ hole_start, hole_size / SZ_1K);
+ spare -= min3(spare, hole_size, max_hole);
+ max_hole = max(max_hole, hole_size);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
+ max_hole / SZ_1K, spare / SZ_1K);
+ return max_hole > spare ? max_hole - spare : 0;
+}
+
+static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
+{
+ u64 available = pf_get_max_ggtt(gt);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 fair;
+
+ /*
+ * To simplify the logic we only look at single largest GGTT region
+ * as that will be always the best fit for 1 VF case, and most likely
+ * will also nicely cover other cases where VFs are provisioned on the
+ * fresh and idle PF driver, without any stale GGTT allocations spread
+ * in the middle of the full GGTT range.
+ */
+
+ fair = div_u64(available, num_vfs);
+ fair = ALIGN_DOWN(fair, alignment);
+ xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
+ available / SZ_1K, num_vfs, fair / SZ_1K);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u64 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_ggtt(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
+}
+
+static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
+{
+ /* XXX: preliminary */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
+ hweight64(gt->info.engine_mask) : SZ_256;
+}
+
+static u32 pf_get_spare_ctxs(struct xe_gt *gt)
+{
+ u32 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.num_ctxs;
+ spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (spare > GUC_ID_MAX)
+ return -EINVAL;
+
+ if (spare && spare < pf_get_min_spare_ctxs(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.num_ctxs = spare;
+
+ return 0;
+}
+
+/* Return: start ID or negative error code on failure */
+static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+ unsigned int spare = pf_get_spare_ctxs(gt);
+
+ return xe_guc_id_mgr_reserve(idm, num, spare);
+}
+
+static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+
+ if (num)
+ xe_guc_id_mgr_release(idm, start, num);
+}
+
+static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
+ config->begin_ctx = 0;
+ config->num_ctxs = 0;
+}
+
+static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int ret;
+
+ xe_gt_assert(gt, vfid);
+
+ if (num_ctxs > GUC_ID_MAX)
+ return -EINVAL;
+
+ if (config->num_ctxs) {
+ ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
+ if (unlikely(ret))
+ return ret;
+
+ pf_release_config_ctxs(gt, config);
+ }
+
+ if (!num_ctxs)
+ return 0;
+
+ ret = pf_reserve_ctxs(gt, num_ctxs);
+ if (unlikely(ret < 0))
+ return ret;
+
+ config->begin_ctx = ret;
+ config->num_ctxs = num_ctxs;
+
+ ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
+ if (unlikely(ret)) {
+ pf_release_config_ctxs(gt, config);
+ return ret;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
+ vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
+ return 0;
+}
+
+static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->num_ctxs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
+ *
+ * Return: VF's quota (or PF's spare).
+ */
+u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 num_ctxs;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
+ else
+ num_ctxs = pf_get_spare_ctxs(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return num_ctxs;
+}
+
+static const char *no_unit(u32 unused)
+{
+ return "";
+}
+
+static const char *spare_unit(u32 unused)
+{
+ return " spare";
+}
+
+static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
+ const char *what, const char *(*unit)(u32), int err)
+{
+ char name[8];
+
+ xe_sriov_function_name(vfid, name, sizeof(name));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
+ name, value, unit(value), what, ERR_PTR(err));
+ xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
+ name, actual, unit(actual), what);
+ return err;
+ }
+
+ /* the actual value may have changed during provisioning */
+ xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
+ name, actual, unit(actual), what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @num_ctxs: requested number of GuC contexts IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
+ else
+ err = pf_set_spare_ctxs(gt, num_ctxs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, num_ctxs,
+ xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
+ "GuC context IDs", vfid ? no_unit : spare_unit, err);
+}
+
+static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
+ u32 value, u32 (*get)(struct xe_gt*, unsigned int),
+ const char *what, const char *(*unit)(u32),
+ unsigned int last, int err)
+{
+ xe_gt_assert(gt, first);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, first <= last);
+
+ if (num_vfs == 1)
+ return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
+ first, first + num_vfs - 1, what);
+ if (last > first)
+ pf_config_bulk_set_u32_done(gt, first, last - first, value,
+ get, what, unit, last, 0);
+ return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
+ }
+
+ /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
+ value = get(gt, first);
+ xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
+ first, first + num_vfs - 1, value, unit(value), what);
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier
+ * @num_vfs: number of VFs to provision
+ * @num_ctxs: requested number of GuC contexts IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u32 num_ctxs)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_ctxs(gt, n, num_ctxs);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
+ xe_gt_sriov_pf_config_get_ctxs,
+ "GuC context IDs", no_unit, n, err);
+}
+
+static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
+ u32 spare = pf_get_spare_ctxs(gt);
+ u32 fair = (idm->total - spare) / num_vfs;
+ int ret;
+
+ for (; fair; --fair) {
+ ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
+ if (ret < 0)
+ continue;
+ xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
+ break;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u32 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_ctxs(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
+}
+
+static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
+{
+ /* XXX: preliminary, we don't use doorbells yet! */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
+}
+
+static u32 pf_get_spare_dbs(struct xe_gt *gt)
+{
+ u32 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.num_dbs;
+ spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (spare > GUC_NUM_DOORBELLS)
+ return -EINVAL;
+
+ if (spare && spare < pf_get_min_spare_dbs(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.num_dbs = spare;
+ return 0;
+}
+
+/* Return: start ID or negative error code on failure */
+static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+ unsigned int spare = pf_get_spare_dbs(gt);
+
+ return xe_guc_db_mgr_reserve_range(dbm, num, spare);
+}
+
+static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+
+ if (num)
+ xe_guc_db_mgr_release_range(dbm, start, num);
+}
+
+static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ pf_release_dbs(gt, config->begin_db, config->num_dbs);
+ config->begin_db = 0;
+ config->num_dbs = 0;
+}
+
+static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int ret;
+
+ xe_gt_assert(gt, vfid);
+
+ if (num_dbs > GUC_NUM_DOORBELLS)
+ return -EINVAL;
+
+ if (config->num_dbs) {
+ ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
+ if (unlikely(ret))
+ return ret;
+
+ pf_release_config_dbs(gt, config);
+ }
+
+ if (!num_dbs)
+ return 0;
+
+ ret = pf_reserve_dbs(gt, num_dbs);
+ if (unlikely(ret < 0))
+ return ret;
+
+ config->begin_db = ret;
+ config->num_dbs = num_dbs;
+
+ ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
+ if (unlikely(ret)) {
+ pf_release_config_dbs(gt, config);
+ return ret;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
+ vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
+ return 0;
+}
+
+static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->num_dbs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
+ *
+ * Return: VF's quota (or PF's spare).
+ */
+u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 num_dbs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ num_dbs = pf_get_vf_config_dbs(gt, vfid);
+ else
+ num_dbs = pf_get_spare_dbs(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return num_dbs;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @num_dbs: requested number of GuC doorbells IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_dbs(gt, vfid, num_dbs);
+ else
+ err = pf_set_spare_dbs(gt, num_dbs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, num_dbs,
+ xe_gt_sriov_pf_config_get_dbs(gt, vfid),
+ "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @num_dbs: requested number of GuC doorbell IDs (0 to release)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u32 num_dbs)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_dbs(gt, n, num_dbs);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
+ xe_gt_sriov_pf_config_get_dbs,
+ "GuC doorbell IDs", no_unit, n, err);
+}
+
+static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
+ u32 spare = pf_get_spare_dbs(gt);
+ u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
+ int ret;
+
+ for (; fair; --fair) {
+ ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
+ if (ret < 0)
+ continue;
+ xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
+ break;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u32 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_dbs(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
+}
+
+static u64 pf_get_lmem_alignment(struct xe_gt *gt)
+{
+ /* this might be platform dependent */
+ return SZ_2M;
+}
+
+static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
+{
+ /* this might be platform dependent */
+ return SZ_128M; /* XXX: preliminary */
+}
+
+static u64 pf_get_spare_lmem(struct xe_gt *gt)
+{
+ u64 spare;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = gt->sriov.pf.spare.lmem_size;
+ spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
+
+ return spare;
+}
+
+static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (size && size < pf_get_min_spare_lmem(gt))
+ return -EINVAL;
+
+ gt->sriov.pf.spare.lmem_size = size;
+ return 0;
+}
+
+static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_bo *bo;
+
+ bo = config->lmem_obj;
+ return bo ? bo->size : 0;
+}
+
+static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile;
+ unsigned int tid;
+ int err;
+
+ for_each_tile(tile, xe, tid) {
+ if (tile->primary_gt == gt) {
+ err = pf_push_vf_cfg_lmem(gt, vfid, size);
+ } else {
+ u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
+
+ if (!lmem)
+ continue;
+ err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
+ }
+ if (unlikely(err))
+ return err;
+ }
+ return 0;
+}
+
+static void pf_force_lmtt_invalidate(struct xe_device *xe)
+{
+ /* TODO */
+}
+
+static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_drop_pages(lmtt, vfid);
+ }
+}
+
+static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config;
+ struct xe_tile *tile;
+ struct xe_lmtt *lmtt;
+ struct xe_bo *bo;
+ struct xe_gt *gt;
+ u64 total, offset;
+ unsigned int gtid;
+ unsigned int tid;
+ int err;
+
+ total = 0;
+ for_each_tile(tile, xe, tid)
+ total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+
+ xe_lmtt_drop_pages(lmtt, vfid);
+ if (!total)
+ continue;
+
+ err = xe_lmtt_prepare_pages(lmtt, vfid, total);
+ if (err)
+ goto fail;
+
+ offset = 0;
+ for_each_gt(gt, xe, gtid) {
+ if (xe_gt_is_media_type(gt))
+ continue;
+
+ config = pf_pick_vf_config(gt, vfid);
+ bo = config->lmem_obj;
+ if (!bo)
+ continue;
+
+ err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
+ if (err)
+ goto fail;
+ offset += bo->size;
+ }
+ }
+
+ pf_force_lmtt_invalidate(xe);
+ return 0;
+
+fail:
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_drop_pages(lmtt, vfid);
+ }
+ return err;
+}
+
+static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (config->lmem_obj) {
+ xe_bo_unpin_map_no_vm(config->lmem_obj);
+ config->lmem_obj = NULL;
+ }
+}
+
+static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ size = round_up(size, pf_get_lmem_alignment(gt));
+
+ if (config->lmem_obj) {
+ err = pf_distribute_config_lmem(gt, vfid, 0);
+ if (unlikely(err))
+ return err;
+
+ pf_reset_vf_lmtt(xe, vfid);
+ pf_release_vf_config_lmem(gt, config);
+ }
+ xe_gt_assert(gt, !config->lmem_obj);
+
+ if (!size)
+ return 0;
+
+ xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ config->lmem_obj = bo;
+
+ err = pf_update_vf_lmtt(xe, vfid);
+ if (unlikely(err))
+ goto release;
+
+ err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ if (unlikely(err))
+ goto reset_lmtt;
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
+ vfid, bo->size, bo->size / SZ_1M);
+ return 0;
+
+reset_lmtt:
+ pf_reset_vf_lmtt(xe, vfid);
+release:
+ pf_release_vf_config_lmem(gt, config);
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's spare) LMEM quota.
+ */
+u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
+{
+ u64 size;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ size = pf_get_vf_config_lmem(gt, vfid);
+ else
+ size = pf_get_spare_lmem(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return size;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: the VF identifier
+ * @size: requested LMEM size
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (vfid)
+ err = pf_provision_vf_lmem(gt, vfid, size);
+ else
+ err = pf_set_spare_lmem(gt, size);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u64_done(gt, vfid, size,
+ xe_gt_sriov_pf_config_get_lmem(gt, vfid),
+ vfid ? "LMEM" : "spare LMEM", err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision
+ * @size: requested LMEM size
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs, u64 size)
+{
+ unsigned int n;
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!num_vfs)
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = vfid; n < vfid + num_vfs; n++) {
+ err = pf_provision_vf_lmem(gt, n, size);
+ if (err)
+ break;
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
+ xe_gt_sriov_pf_config_get_lmem,
+ "LMEM", n, err);
+}
+
+static u64 pf_query_free_lmem(struct xe_gt *gt)
+{
+ struct xe_tile *tile = gt->tile;
+
+ return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
+}
+
+static u64 pf_query_max_lmem(struct xe_gt *gt)
+{
+ u64 alignment = pf_get_lmem_alignment(gt);
+ u64 spare = pf_get_spare_lmem(gt);
+ u64 free = pf_query_free_lmem(gt);
+ u64 avail;
+
+ /* XXX: need to account for 2MB blocks only */
+ avail = free > spare ? free - spare : 0;
+ avail = round_down(avail, alignment);
+
+ return avail;
+}
+
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
+#else
+#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
+#endif
+
+static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
+{
+ u64 available = pf_query_max_lmem(gt);
+ u64 alignment = pf_get_lmem_alignment(gt);
+ u64 fair;
+
+ fair = div_u64(available, num_vfs);
+ fair = ALIGN_DOWN(fair, alignment);
+#ifdef MAX_FAIR_LMEM
+ fair = min_t(u64, MAX_FAIR_LMEM, fair);
+#endif
+ xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
+ available / SZ_1M, num_vfs, fair / SZ_1M);
+ return fair;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
+ * @gt: the &xe_gt (can't be media)
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ u64 fair;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return 0;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ fair = pf_estimate_fair_lmem(gt, num_vfs);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (!fair)
+ return -ENOSPC;
+
+ return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
+ * @gt: the &xe_gt
+ * @vfid: starting VF identifier (can't be 0)
+ * @num_vfs: number of VFs to provision (can't be 0)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
+ unsigned int num_vfs)
+{
+ int result = 0;
+ int err;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, num_vfs);
+
+ if (!xe_gt_is_media_type(gt)) {
+ err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
+ result = result ?: err;
+ err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
+ result = result ?: err;
+ }
+ err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
+ result = result ?: err;
+ err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
+ result = result ?: err;
+
+ return result;
+}
+
+static const char *exec_quantum_unit(u32 exec_quantum)
+{
+ return exec_quantum ? "ms" : "(infinity)";
+}
+
+static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_exec_quantum(gt, vfid, exec_quantum);
+ if (unlikely(err))
+ return err;
+
+ config->exec_quantum = exec_quantum;
+ return 0;
+}
+
+static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->exec_quantum;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, exec_quantum,
+ xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
+ "execution quantum", exec_quantum_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) execution quantum in milliseconds.
+ */
+u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 exec_quantum;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ exec_quantum = pf_get_exec_quantum(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return exec_quantum;
+}
+
+static const char *preempt_timeout_unit(u32 preempt_timeout)
+{
+ return preempt_timeout ? "us" : "(infinity)";
+}
+
+static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_preempt_timeout(gt, vfid, preempt_timeout);
+ if (unlikely(err))
+ return err;
+
+ config->preempt_timeout = preempt_timeout;
+
+ return 0;
+}
+
+static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->preempt_timeout;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, preempt_timeout,
+ xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
+ "preemption timeout", preempt_timeout_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) preemption timeout in microseconds.
+ */
+u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 preempt_timeout;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ preempt_timeout = pf_get_preempt_timeout(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return preempt_timeout;
+}
+
+static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ config->exec_quantum = 0;
+ config->preempt_timeout = 0;
+}
+
+static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ if (!xe_gt_is_media_type(gt)) {
+ pf_release_vf_config_ggtt(gt, config);
+ pf_release_vf_config_lmem(gt, config);
+ }
+ pf_release_config_ctxs(gt, config);
+ pf_release_config_dbs(gt, config);
+ pf_reset_config_sched(gt, config);
+}
+
+/**
+ * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @force: force configuration release
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err || force)
+ pf_release_vf_config(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
+ vfid, ERR_PTR(err),
+ force ? " but all resources were released anyway!" : "");
+ }
+
+ return force ? 0 : err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @refresh: explicit refresh
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (refresh)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
+ refresh ? "refresh" : "push", vfid, ERR_PTR(err));
+ }
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GGTT configuration data for all VFs.
+ * VFs without provisioned GGTT are ignored.
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+ char buf[10];
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!drm_mm_node_allocated(&config->ggtt_region))
+ continue;
+
+ string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
+ n, config->ggtt_region.start,
+ config->ggtt_region.start + config->ggtt_region.size - 1, buf);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GuC context ID allocations across all VFs.
+ * VFs without GuC context IDs are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->num_ctxs)
+ continue;
+
+ drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
+ n,
+ config->begin_ctx,
+ config->begin_ctx + config->num_ctxs - 1,
+ config->num_ctxs);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GuC doorbell IDs allocations across all VFs.
+ * VFs without GuC doorbell IDs are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->num_dbs)
+ continue;
+
+ drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
+ n,
+ config->begin_db,
+ config->begin_db + config->num_dbs - 1,
+ config->num_dbs);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print GGTT ranges that are available for the provisioning.
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 hole_start, hole_end, hole_size;
+ u64 spare, avail, total = 0;
+ char buf[10];
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ spare = pf_get_spare_ggtt(gt);
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ total += hole_size;
+
+ string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
+ hole_start, hole_end - 1, buf);
+ }
+
+ mutex_unlock(&ggtt->lock);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
+
+ string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
+
+ avail = total > spare ? total - spare : 0;
+
+ string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
new file mode 100644
index 000000000000..5e6b36f00b5b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONFIG_H_
+#define _XE_GT_SRIOV_PF_CONFIG_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_gt;
+
+u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size);
+int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt,
+ unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt,
+ unsigned int vfid, unsigned int num_vfs, u64 size);
+
+u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs);
+int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u32 num_ctxs);
+
+u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs);
+int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u32 num_dbs);
+
+u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size);
+int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
+ u64 size);
+
+u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum);
+
+u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout);
+
+int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
+int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
+
+int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p);
+
+int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
new file mode 100644
index 000000000000..d3745c355957
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
+#define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
+
+#include <drm/drm_mm.h>
+
+struct xe_bo;
+
+/**
+ * struct xe_gt_sriov_config - GT level per-VF configuration data.
+ *
+ * Used by the PF driver to maintain per-VF provisioning data.
+ */
+struct xe_gt_sriov_config {
+ /** @ggtt_region: GGTT region assigned to the VF. */
+ struct drm_mm_node ggtt_region;
+ /** @lmem_obj: LMEM allocation for use by the VF. */
+ struct xe_bo *lmem_obj;
+ /** @num_ctxs: number of GuC contexts IDs. */
+ u16 num_ctxs;
+ /** @begin_ctx: start index of GuC context ID range. */
+ u16 begin_ctx;
+ /** @num_dbs: number of GuC doorbells IDs. */
+ u16 num_dbs;
+ /** @begin_db: start index of GuC doorbell ID range. */
+ u16 begin_db;
+ /** @exec_quantum: execution-quantum in milliseconds. */
+ u32 exec_quantum;
+ /** @preempt_timeout: preemption timeout in microseconds. */
+ u32 preempt_timeout;
+};
+
+/**
+ * struct xe_gt_sriov_spare_config - GT-level PF spare configuration data.
+ *
+ * Used by the PF driver to maintain it's own reserved (spare) provisioning
+ * data that is not applicable to be tracked in struct xe_gt_sriov_config.
+ */
+struct xe_gt_sriov_spare_config {
+ /** @ggtt_size: GGTT size. */
+ u64 ggtt_size;
+ /** @lmem_size: LMEM size. */
+ u64 lmem_size;
+ /** @num_ctxs: number of GuC submission contexts. */
+ u16 num_ctxs;
+ /** @num_dbs: number of GuC doorbells. */
+ u16 num_dbs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
new file mode 100644
index 000000000000..40b8f881fe04
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include "abi/guc_actions_sriov_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_ct.h"
+#include "xe_sriov.h"
+
+static const char *control_cmd_to_string(u32 cmd)
+{
+ switch (cmd) {
+ case GUC_PF_TRIGGER_VF_PAUSE:
+ return "PAUSE";
+ case GUC_PF_TRIGGER_VF_RESUME:
+ return "RESUME";
+ case GUC_PF_TRIGGER_VF_STOP:
+ return "STOP";
+ case GUC_PF_TRIGGER_VF_FLR_START:
+ return "FLR_START";
+ case GUC_PF_TRIGGER_VF_FLR_FINISH:
+ return "FLR_FINISH";
+ default:
+ return "<unknown>";
+ }
+}
+
+static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
+{
+ u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd),
+ };
+ int ret;
+
+ /* XXX those two commands are now sent from the G2H handler */
+ if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
+ return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
+
+ ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n",
+ vfid, control_cmd_to_string(cmd), ERR_PTR(err));
+ return err;
+}
+
+static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE);
+}
+
+static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME);
+}
+
+static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_STOP);
+}
+
+static int pf_send_vf_flr_start(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_START);
+}
+
+static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH);
+}
+
+/**
+ * xe_gt_sriov_pf_control_pause_vf - Pause a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_pause(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_resume_vf - Resume a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_resume(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_stop_vf - Stop a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_stop(gt, vfid);
+}
+
+/**
+ * DOC: The VF FLR Flow with GuC
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * Step 2a: on some platforms G2H is only received from root GuC
+ * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ */
+
+static bool needs_dispatch_flr(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC;
+}
+
+static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gt *gtit;
+ unsigned int gtid;
+
+ xe_gt_sriov_info(gt, "VF%u FLR\n", vfid);
+
+ if (needs_dispatch_flr(xe)) {
+ for_each_gt(gtit, xe, gtid)
+ pf_send_vf_flr_start(gtit, vfid);
+ } else {
+ pf_send_vf_flr_start(gt, vfid);
+ }
+}
+
+static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
+{
+ pf_send_vf_flr_finish(gt, vfid);
+}
+
+static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_FLR:
+ pf_handle_vf_flr(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_FLR_DONE:
+ pf_handle_vf_flr_done(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ break;
+ case GUC_PF_NOTIFY_VF_FIXUP_DONE:
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+static int pf_handle_pf_event(struct xe_gt *gt, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_ENABLE:
+ xe_gt_sriov_dbg_verbose(gt, "VFs %s/%s\n",
+ str_enabled_disabled(true),
+ str_enabled_disabled(false));
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_process_guc2pf - Handle VF state notification from GuC.
+ * @gt: the &xe_gt
+ * @msg: the G2H message
+ * @len: the length of the G2H message
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ u32 vfid;
+ u32 eventid;
+
+ xe_gt_assert(gt, len);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ GUC_ACTION_GUC2PF_VF_STATE_NOTIFY);
+
+ if (unlikely(!xe_device_is_sriov_pf(gt_to_xe(gt))))
+ return -EPROTO;
+
+ if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0])))
+ return -EPFNOSUPPORT;
+
+ if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN))
+ return -EPROTO;
+
+ vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]);
+ eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]);
+
+ return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
new file mode 100644
index 000000000000..850a3e37661f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_H_
+#define _XE_GT_SRIOV_PF_CONTROL_H_
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
+#else
+static inline int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ return -EPROTO;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
new file mode 100644
index 000000000000..0bf12d89ceb2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_HELPERS_H_
+#define _XE_GT_SRIOV_PF_HELPERS_H_
+
+#include "xe_gt_types.h"
+#include "xe_sriov_pf_helpers.h"
+
+/**
+ * xe_gt_sriov_pf_assert_vfid() - warn if &id is not a supported VF number when debugging.
+ * @gt: the PF &xe_gt to assert on
+ * @vfid: the VF number to assert
+ *
+ * Assert that &gt belongs to the Physical Function (PF) device and provided &vfid
+ * is within a range of supported VF numbers (up to maximum number of VFs that
+ * driver can support, including VF0 that represents the PF itself).
+ *
+ * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ */
+#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
+
+static inline int xe_gt_sriov_pf_get_totalvfs(struct xe_gt *gt)
+{
+ return xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+}
+
+static inline struct mutex *xe_gt_sriov_pf_master_mutex(struct xe_gt *gt)
+{
+ return xe_sriov_pf_master_mutex(gt_to_xe(gt));
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
new file mode 100644
index 000000000000..fae5be5a2a11
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include "abi/guc_actions_sriov_abi.h"
+
+#include "xe_bo.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_klv_helpers.h"
+#include "xe_pm.h"
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
+{
+ u32 request[] = {
+ GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ size,
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/*
+ * Return: number of KLVs that were successfully parsed and saved,
+ * negative error code on failure.
+ */
+static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
+{
+ const u32 bytes = num_dwords * sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(bytes, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
+
+ ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
+
+ xe_bo_unpin_map_no_vm(bo);
+
+ return ret;
+}
+
+/*
+ * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
+ * negative error code on failure.
+ */
+static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ int ret;
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ ret = pf_send_policy_klvs(gt, klvs, num_dwords);
+
+ if (ret != num_klvs) {
+ int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
+ num_klvs, str_plural(num_klvs), ERR_PTR(err));
+ xe_guc_klv_print(klvs, num_dwords, &p);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
+{
+ u32 klv[] = {
+ PREP_GUC_KLV(key, 1),
+ value,
+ };
+
+ return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv));
+}
+
+static int pf_update_policy_bool(struct xe_gt *gt, u16 key, bool *policy, bool value)
+{
+ int err;
+
+ err = pf_push_policy_u32(gt, key, value);
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value), ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to '%s'\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value));
+
+ *policy = value;
+ return 0;
+}
+
+static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 value)
+{
+ int err;
+
+ err = pf_push_policy_u32(gt, key, value);
+ if (unlikely(err)) {
+ xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
+ key, xe_guc_klv_key_to_string(key),
+ str_enabled_disabled(value), ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to %u\n",
+ key, xe_guc_klv_key_to_string(key), value);
+
+ *policy = value;
+ return 0;
+}
+
+static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+}
+
+static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_sched_if_idle(gt, gt->sriov.pf.policy.guc.sched_if_idle);
+}
+
+static void pf_sanitize_sched_if_idle(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.sched_if_idle = false;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_sched_if_idle - Control the 'sched_if_idle' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @enable: the value of the 'sched_if_idle' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_if_idle(gt, enable);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_sched_if_idle - Retrieve value of 'sched_if_idle' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'sched_if_idle' policy.
+ */
+bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt)
+{
+ bool enable;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ enable = gt->sriov.pf.policy.guc.sched_if_idle;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return enable;
+}
+
+static int pf_provision_reset_engine(struct xe_gt *gt, bool enable)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY,
+ &gt->sriov.pf.policy.guc.reset_engine, enable);
+}
+
+static int pf_reprovision_reset_engine(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_reset_engine(gt, gt->sriov.pf.policy.guc.reset_engine);
+}
+
+static void pf_sanitize_reset_engine(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.reset_engine = false;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_reset_engine - Control the 'reset_engine' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @enable: the value of the 'reset_engine' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_reset_engine(gt, enable);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_reset_engine - Retrieve value of 'reset_engine' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'reset_engine' policy.
+ */
+bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt)
+{
+ bool enable;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ enable = gt->sriov.pf.policy.guc.reset_engine;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return enable;
+}
+
+static int pf_provision_sample_period(struct xe_gt *gt, u32 value)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_update_policy_u32(gt, GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY,
+ &gt->sriov.pf.policy.guc.sample_period, value);
+}
+
+static int pf_reprovision_sample_period(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_provision_sample_period(gt, gt->sriov.pf.policy.guc.sample_period);
+}
+
+static void pf_sanitize_sample_period(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ gt->sriov.pf.policy.guc.sample_period = 0;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_set_sample_period - Control the 'sample_period' policy.
+ * @gt: the &xe_gt where to apply the policy
+ * @value: the value of the 'sample_period' policy
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sample_period(gt, value);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_policy_get_sample_period - Retrieve value of 'sample_period' policy.
+ * @gt: the &xe_gt where to read the policy from
+ *
+ * This function can only be called on PF.
+ *
+ * Return: value of 'sample_period' policy.
+ */
+u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt)
+{
+ u32 value;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ value = gt->sriov.pf.policy.guc.sample_period;
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return value;
+}
+
+static void pf_sanitize_guc_policies(struct xe_gt *gt)
+{
+ pf_sanitize_sched_if_idle(gt);
+ pf_sanitize_reset_engine(gt);
+ pf_sanitize_sample_period(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_policy_sanitize - Reset policy settings.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt)
+{
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_sanitize_guc_policies(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_policy_reprovision - Reprovision (and optionally reset) policy settings.
+ * @gt: the &xe_gt
+ * @reset: if true will reprovision using default values instead of latest
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
+{
+ int err = 0;
+
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ if (reset)
+ pf_sanitize_guc_policies(gt);
+ err |= pf_reprovision_sched_if_idle(gt);
+ err |= pf_reprovision_reset_engine(gt);
+ err |= pf_reprovision_sample_period(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return err ? -ENXIO : 0;
+}
+
+static void print_guc_policies(struct drm_printer *p, struct xe_gt_sriov_guc_policies *policy)
+{
+ drm_printf(p, "%s:\t%s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY),
+ str_enabled_disabled(policy->sched_if_idle));
+ drm_printf(p, "%s:\t%s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY),
+ str_enabled_disabled(policy->reset_engine));
+ drm_printf(p, "%s:\t%u %s\n",
+ xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY),
+ policy->sample_period, policy->sample_period ? "ms" : "(disabled)");
+}
+
+/**
+ * xe_gt_sriov_pf_policy_print - Dump actual policy values.
+ * @gt: the &xe_gt where to read the policy from
+ * @p: the &drm_printer
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ print_guc_policies(p, &gt->sriov.pf.policy.guc);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h
new file mode 100644
index 000000000000..2a5dc33dc6d7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_POLICY_H_
+#define _XE_GT_SRIOV_PF_POLICY_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_gt;
+
+int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable);
+bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable);
+bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value);
+u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt);
+
+void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt);
+int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset);
+int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h
new file mode 100644
index 000000000000..4de532af135e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_POLICY_TYPES_H_
+#define _XE_GT_SRIOV_PF_POLICY_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_gt_sriov_guc_policies - GuC SR-IOV policies.
+ * @sched_if_idle: controls strict scheduling policy.
+ * @reset_engine: controls engines reset on VF switch policy.
+ * @sample_period: adverse events sampling period (in milliseconds).
+ */
+struct xe_gt_sriov_guc_policies {
+ bool sched_if_idle;
+ bool reset_engine;
+ u32 sample_period;
+};
+
+/**
+ * struct xe_gt_sriov_pf_policy - PF policy data.
+ * @guc: GuC scheduling policies.
+ */
+struct xe_gt_sriov_pf_policy {
+ struct xe_gt_sriov_guc_policies guc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
new file mode 100644
index 000000000000..faf9ee8266ce
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_TYPES_H_
+#define _XE_GT_SRIOV_PF_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_gt_sriov_pf_config_types.h"
+#include "xe_gt_sriov_pf_policy_types.h"
+
+/**
+ * struct xe_gt_sriov_metadata - GT level per-VF metadata.
+ */
+struct xe_gt_sriov_metadata {
+ /** @config: per-VF provisioning data. */
+ struct xe_gt_sriov_config config;
+};
+
+/**
+ * struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @policy: policy data.
+ * @spare: PF-only provisioning configuration.
+ * @vfs: metadata for all VFs.
+ */
+struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_policy policy;
+ struct xe_gt_sriov_spare_config spare;
+ struct xe_gt_sriov_metadata *vfs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c
index c69d2e8a0fe1..1e5971072bc8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c
@@ -29,7 +29,7 @@ static void gt_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(gt->sysfs);
}
-void xe_gt_sysfs_init(struct xe_gt *gt)
+int xe_gt_sysfs_init(struct xe_gt *gt)
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
@@ -38,24 +38,18 @@ void xe_gt_sysfs_init(struct xe_gt *gt)
kg = kzalloc(sizeof(*kg), GFP_KERNEL);
if (!kg)
- return;
+ return -ENOMEM;
kobject_init(&kg->base, &xe_gt_sysfs_kobj_type);
kg->gt = gt;
err = kobject_add(&kg->base, tile->sysfs, "gt%d", gt->info.id);
if (err) {
- drm_warn(&xe->drm, "failed to add GT sysfs directory, err: %d\n", err);
kobject_put(&kg->base);
- return;
+ return err;
}
gt->sysfs = &kg->base;
- err = drmm_add_action_or_reset(&xe->drm, gt_sysfs_fini, gt);
- if (err) {
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return;
- }
+ return drmm_add_action_or_reset(&xe->drm, gt_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.h b/drivers/gpu/drm/xe/xe_gt_sysfs.h
index e3ec278ca0be..ecbfcc5c7d42 100644
--- a/drivers/gpu/drm/xe/xe_gt_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.h
@@ -8,7 +8,7 @@
#include "xe_gt_sysfs_types.h"
-void xe_gt_sysfs_init(struct xe_gt *gt);
+int xe_gt_sysfs_init(struct xe_gt *gt);
static inline struct xe_gt *
kobj_to_gt(struct kobject *kobj)
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
index 63d640591a52..fbe21a8599ca 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
@@ -11,6 +11,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Throttle
@@ -38,10 +39,12 @@ static u32 read_perf_limit_reasons(struct xe_gt *gt)
{
u32 reg;
+ xe_pm_runtime_get(gt_to_xe(gt));
if (xe_gt_is_media_type(gt))
reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
else
reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+ xe_pm_runtime_put(gt_to_xe(gt));
return reg;
}
@@ -233,19 +236,14 @@ static void gt_throttle_sysfs_fini(struct drm_device *drm, void *arg)
sysfs_remove_group(gt->freq, &throttle_group_attrs);
}
-void xe_gt_throttle_sysfs_init(struct xe_gt *gt)
+int xe_gt_throttle_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
err = sysfs_create_group(gt->freq, &throttle_group_attrs);
- if (err) {
- drm_warn(&xe->drm, "failed to register throttle sysfs, err: %d\n", err);
- return;
- }
-
- err = drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
index 3ecfd4beffe1..6c61e6f228a8 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
@@ -10,7 +10,7 @@
struct xe_gt;
-void xe_gt_throttle_sysfs_init(struct xe_gt *gt);
+int xe_gt_throttle_sysfs_init(struct xe_gt *gt);
#endif /* _XE_GT_THROTTLE_SYSFS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index f03e077f81a0..93df2d7969b3 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -11,7 +11,9 @@
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_mmio.h"
#include "xe_trace.h"
+#include "regs/xe_guc_regs.h"
#define TLB_TIMEOUT (HZ / 4)
@@ -61,7 +63,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
spin_lock_init(&gt->tlb_invalidation.pending_lock);
spin_lock_init(&gt->tlb_invalidation.lock);
- gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
xe_gt_tlb_fence_timeout);
@@ -210,7 +211,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
* negative error code on error.
*/
-int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
{
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
@@ -223,6 +224,45 @@ int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
}
/**
+ * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
+ * @gt: graphics tile
+ *
+ * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
+ * synchronous.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
+ gt->uc.guc.submission_state.enabled) {
+ int seqno;
+
+ seqno = xe_gt_tlb_invalidation_guc(gt);
+ if (seqno <= 0)
+ return seqno;
+
+ xe_gt_tlb_invalidation_wait(gt, seqno);
+ } else if (xe_device_uc_enabled(xe)) {
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else {
+ xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
+
+ return 0;
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index b333c1709397..fbb743d80d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -16,7 +16,7 @@ struct xe_vma;
int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_guc(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 5dc62fe1be49..3733e7a6860d 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -8,12 +8,10 @@
#include <linux/bitmap.h>
#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_mmio.h"
-#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
-#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
-
static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
{
@@ -62,6 +60,114 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
}
+/**
+ * gen_l3_mask_from_pattern - Replicate a bit pattern according to a mask
+ *
+ * It is used to compute the L3 bank masks in a generic format on
+ * various platforms where the internal representation of L3 node
+ * and masks from registers are different.
+ *
+ * @xe: device
+ * @dst: destination
+ * @pattern: pattern to replicate
+ * @patternbits: size of the pattern, in bits
+ * @mask: mask describing where to replicate the pattern
+ *
+ * Example 1:
+ * ----------
+ * @pattern = 0b1111
+ * └┬─┘
+ * @patternbits = 4 (bits)
+ * @mask = 0b0101
+ * ││││
+ * │││└────────────────── 0b1111 (=1×0b1111)
+ * ││└──────────── 0b0000 │ (=0×0b1111)
+ * │└────── 0b1111 │ │ (=1×0b1111)
+ * └ 0b0000 │ │ │ (=0×0b1111)
+ * │ │ │ │
+ * @dst = 0b0000 0b1111 0b0000 0b1111
+ *
+ * Example 2:
+ * ----------
+ * @pattern = 0b11111111
+ * └┬─────┘
+ * @patternbits = 8 (bits)
+ * @mask = 0b10
+ * ││
+ * ││
+ * ││
+ * │└────────── 0b00000000 (=0×0b11111111)
+ * └ 0b11111111 │ (=1×0b11111111)
+ * │ │
+ * @dst = 0b11111111 0b00000000
+ */
+static void
+gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst,
+ xe_l3_bank_mask_t pattern, int patternbits,
+ unsigned long mask)
+{
+ unsigned long bit;
+
+ xe_assert(xe, fls(mask) <= patternbits);
+ for_each_set_bit(bit, &mask, 32) {
+ xe_l3_bank_mask_t shifted_pattern = {};
+
+ bitmap_shift_left(shifted_pattern, pattern, bit * patternbits,
+ XE_MAX_L3_BANK_MASK_BITS);
+ bitmap_or(dst, dst, shifted_pattern, XE_MAX_L3_BANK_MASK_BITS);
+ }
+}
+
+static void
+load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+
+ if (GRAPHICS_VER(xe) >= 20) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
+ u32 bank_val = REG_FIELD_GET(XE2_GT_L3_MODE_MASK, fuse3);
+
+ bitmap_from_arr32(per_node, &bank_val, 32);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 4,
+ meml3_en);
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
+ xe_l3_bank_mask_t per_node = {};
+ xe_l3_bank_mask_t per_mask_bit = {};
+ u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+ u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4);
+ u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4);
+
+ bitmap_set_value8(per_mask_bit, 0x3, 0);
+ gen_l3_mask_from_pattern(xe, per_node, per_mask_bit, 2, bank_val);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 4,
+ meml3_en);
+ } else if (xe->info.platform == XE_PVC) {
+ xe_l3_bank_mask_t per_node = {};
+ xe_l3_bank_mask_t per_mask_bit = {};
+ u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+ u32 bank_val = REG_FIELD_GET(XEHPC_GT_L3_MODE_MASK, fuse3);
+
+ bitmap_set_value8(per_mask_bit, 0xf, 0);
+ gen_l3_mask_from_pattern(xe, per_node, per_mask_bit, 4,
+ bank_val);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 16,
+ meml3_en);
+ } else if (xe->info.platform == XE_DG2) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 mask = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
+
+ bitmap_set_value8(per_node, 0xff, 0);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 8, mask);
+ } else {
+ /* 1:1 register bit to mask bit (inverted register bits) */
+ u32 mask = REG_FIELD_GET(XELP_GT_L3_MODE_MASK, ~fuse3);
+
+ bitmap_from_arr32(l3_bank_mask, &mask, 32);
+ }
+}
+
static void
get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
{
@@ -106,6 +212,7 @@ xe_gt_topology_init(struct xe_gt *gt)
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
XE2_GT_COMPUTE_DSS_2);
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
@@ -123,6 +230,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
gt->fuse_topo.eu_mask_per_dss);
+ drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
+ gt->fuse_topo.l3_bank_mask);
}
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index d1b54fb52ea6..b3e357777a6e 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -8,6 +8,17 @@
#include "xe_gt_types.h"
+/*
+ * Loop over each DSS with the bit is 1 in geometry or compute mask
+ * @dss: iterated DSS bit from the DSS mask
+ * @gt: GT structure
+ */
+#define for_each_dss(dss, gt) \
+ for_each_or_bit((dss), \
+ (gt)->fuse_topo.g_dss_mask, \
+ (gt)->fuse_topo.c_dss_mask, \
+ XE_MAX_DSS_FUSE_BITS)
+
struct drm_printer;
void xe_gt_topology_init(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 70c615dd1498..cfdc761ff7f4 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -8,6 +8,7 @@
#include "xe_force_wake_types.h"
#include "xe_gt_idle_types.h"
+#include "xe_gt_sriov_pf_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
#include "xe_reg_sr_types.h"
@@ -24,11 +25,15 @@ enum xe_gt_type {
XE_GT_TYPE_MEDIA,
};
-#define XE_MAX_DSS_FUSE_REGS 3
-#define XE_MAX_EU_FUSE_REGS 1
+#define XE_MAX_DSS_FUSE_REGS 3
+#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
+#define XE_MAX_EU_FUSE_REGS 1
+#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
+#define XE_MAX_L3_BANK_MASK_BITS 64
-typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(32 * XE_MAX_DSS_FUSE_REGS)];
-typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(32 * XE_MAX_EU_FUSE_REGS)];
+typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(XE_MAX_DSS_FUSE_BITS)];
+typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(XE_MAX_EU_FUSE_BITS)];
+typedef unsigned long xe_l3_bank_mask_t[BITS_TO_LONGS(XE_MAX_L3_BANK_MASK_BITS)];
struct xe_mmio_range {
u32 start;
@@ -138,6 +143,12 @@ struct xe_gt {
u32 adj_offset;
} mmio;
+ /** @sriov: virtualization data related to GT */
+ union {
+ /** @sriov.pf: PF data. Valid only if driver is running as PF */
+ struct xe_gt_sriov_pf pf;
+ } sriov;
+
/**
* @reg_sr: table with registers to be restored on GT init/resume/reset
*/
@@ -177,13 +188,6 @@ struct xe_gt {
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
- /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
- u64 fence_context;
- /**
- * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
- * tlb_invalidation.lock
- */
- u32 fence_seqno;
/** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
@@ -332,6 +336,9 @@ struct xe_gt {
/** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
xe_eu_mask_t eu_mask_per_dss;
+
+ /** @fuse_topo.l3_bank_mask: L3 bank mask */
+ xe_l3_bank_mask_t l3_bank_mask;
} fuse_topo;
/** @steering: register steering for individual HW units */
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 0d2a2dd13f11..240e7a4bbff1 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -12,11 +12,13 @@
#include "abi/guc_actions_abi.h"
#include "abi/guc_errors_abi.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
#include "xe_guc_ct.h"
#include "xe_guc_hwconfig.h"
@@ -33,14 +35,13 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP 0xFEE00000
static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
u32 addr = xe_bo_ggtt_addr(bo);
+ /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
xe_assert(xe, addr < GUC_GGTT_TOP);
xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
@@ -133,15 +134,10 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
-#define GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
-
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_uc_fw *uc_fw = &guc->fw;
- struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
-
u32 flags = 0;
if (XE_WA(gt, 22012773006))
@@ -164,20 +160,15 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
+ if (XE_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
if (XE_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170)) {
- if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- else
- drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n",
- version->major, version->minor, version->patch);
- }
+ if (XE_WA(gt, 14018913170))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
@@ -189,15 +180,23 @@ static u32 guc_ctl_devid(struct xe_guc *guc)
return (((u32)xe->info.devid) << 16) | xe->info.revid;
}
-static void guc_init_params(struct xe_guc *guc)
+static void guc_print_params(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u32 *params = guc->params;
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+}
+
+static void guc_init_params(struct xe_guc *guc)
+{
+ u32 *params = guc->params;
+
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = 0;
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
@@ -205,18 +204,12 @@ static void guc_init_params(struct xe_guc *guc)
params[GUC_CTL_WA] = 0;
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+ guc_print_params(guc);
}
static void guc_init_params_post_hwconfig(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
u32 *params = guc->params;
- int i;
-
- BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
- BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
@@ -225,8 +218,7 @@ static void guc_init_params_post_hwconfig(struct xe_guc *guc)
params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+ guc_print_params(guc);
}
/*
@@ -250,10 +242,11 @@ static void guc_write_params(struct xe_guc *guc)
static void guc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
+ struct xe_gt *gt = guc_to_gt(guc);
- xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
- xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
}
/**
@@ -330,7 +323,7 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
+ ret = drmm_add_action_or_reset(&xe->drm, guc_fini, guc);
if (ret)
goto out;
@@ -343,7 +336,7 @@ int xe_guc_init(struct xe_guc *guc)
return 0;
out:
- drm_err(&xe->drm, "GuC init failed with %d", ret);
+ xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
return ret;
}
@@ -380,7 +373,6 @@ int xe_guc_post_load_init(struct xe_guc *guc)
int xe_guc_reset(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
u32 guc_status, gdrst;
int ret;
@@ -391,16 +383,14 @@ int xe_guc_reset(struct xe_guc *guc)
ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
if (ret) {
- drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
- gdrst);
+ xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
goto err_out;
}
guc_status = xe_mmio_read32(gt, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
- drm_err(&xe->drm,
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
+ xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
+ guc_status);
ret = -EIO;
goto err_out;
}
@@ -463,7 +453,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
static int guc_wait_ucode(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u32 status;
int ret;
@@ -484,35 +474,32 @@ static int guc_wait_ucode(struct xe_guc *guc)
* 200ms. Even at slowest clock, this should be sufficient. And
* in the working case, a larger timeout makes no difference.
*/
- ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
+ ret = xe_mmio_wait32(gt, GUC_STATUS, GS_UKERNEL_MASK,
FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
200000, &status, false);
if (ret) {
- struct drm_device *drm = &xe->drm;
-
- drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status),
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+ xe_gt_info(gt, "GuC load failed: status = 0x%08X\n", status);
+ xe_gt_info(gt, "GuC status: Reset = %u, BootROM = %#X, UKernel = %#X, MIA = %#X, Auth = %#X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status),
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_info(drm, "GuC firmware signature verification failed\n");
+ xe_gt_info(gt, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
XE_GUC_LOAD_STATUS_EXCEPTION) {
- drm_info(drm, "GuC firmware exception. EIP: %#x\n",
- xe_mmio_read32(guc_to_gt(guc),
- SOFT_SCRATCH(13)));
+ xe_gt_info(gt, "GuC firmware exception. EIP: %#x\n",
+ xe_mmio_read32(gt, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
} else {
- drm_dbg(&xe->drm, "GuC successfully loaded");
+ xe_gt_dbg(gt, "GuC successfully loaded\n");
}
return ret;
@@ -604,6 +591,9 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
u32 msg;
+ if (IS_SRIOV_VF(guc_to_xe(guc)))
+ return;
+
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
@@ -612,12 +602,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
- drm_err(&guc_to_xe(guc)->drm,
- "Received early GuC crash dump notification!\n");
+ xe_gt_err(gt, "Received early GuC crash dump notification!\n");
if (msg & XE_GUC_RECV_MSG_EXCEPTION)
- drm_err(&guc_to_xe(guc)->drm,
- "Received early GuC exception notification!\n");
+ xe_gt_err(gt, "Received early GuC exception notification!\n");
}
static void guc_enable_irq(struct xe_guc *guc)
@@ -668,15 +656,15 @@ int xe_guc_enable_communication(struct xe_guc *guc)
int xe_guc_suspend(struct xe_guc *guc)
{
- int ret;
+ struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = {
XE_GUC_ACTION_CLIENT_SOFT_RESET,
};
+ int ret;
ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
if (ret) {
- drm_err(&guc_to_xe(guc)->drm,
- "GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
+ xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -751,8 +739,8 @@ retry:
50000, &reply, false);
if (ret) {
timeout:
- drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
- request[0], reply);
+ xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
+ request[0], reply);
return ret;
}
@@ -790,8 +778,8 @@ timeout:
GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
- drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
- request[0], reason);
+ xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
+ request[0], reason);
goto retry;
}
@@ -800,16 +788,16 @@ timeout:
u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
- drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
- request[0], error, hint);
+ xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
+ request[0], error, hint);
return -ENXIO;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
- drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
- request[0], header);
+ xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
+ request[0], header);
return -EPROTO;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 6ad4c1a90a78..1aafa486edec 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -7,6 +7,8 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
@@ -19,6 +21,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_wa.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -80,6 +83,10 @@ ads_to_map(struct xe_guc_ads *ads)
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -131,6 +138,11 @@ static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
return PAGE_ALIGN(ads->golden_lrc_size);
}
+static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
+{
+ return PAGE_ALIGN(ads->ads_waklv_size);
+}
+
static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
{
/* FIXME: Allocate a proper capture list */
@@ -167,12 +179,22 @@ static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
return PAGE_ALIGN(offset);
}
+static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
+{
+ u32 offset;
+
+ offset = guc_ads_golden_lrc_offset(ads) +
+ guc_ads_golden_lrc_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
{
size_t offset;
- offset = guc_ads_golden_lrc_offset(ads) +
- guc_ads_golden_lrc_size(ads);
+ offset = guc_ads_waklv_offset(ads) +
+ guc_ads_waklv_size(ads);
return PAGE_ALIGN(offset);
}
@@ -260,6 +282,110 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
return total_size;
}
+static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
+ enum xe_guc_klv_ids klv_id,
+ u32 value,
+ u32 *offset, u32 *remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
+ FIELD_PREP(GUC_KLV_0_LEN, 1),
+ value,
+ /* 1 dword data */
+ };
+
+ size = sizeof(klv_entry);
+
+ if (*remain < size) {
+ drm_warn(&ads_to_xe(ads)->drm,
+ "w/a klv buffer too small to add klv id %d\n", klv_id);
+ } else {
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
+ klv_entry, size);
+ *offset += size;
+ *remain -= size;
+ }
+}
+
+static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
+ enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
+{
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+ u32 size;
+
+ size = sizeof(klv_entry);
+
+ if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
+ "w/a klv buffer too small to add klv id %d\n", klv_id))
+ return;
+
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
+ klv_entry, size);
+ *offset += size;
+ *remain -= size;
+}
+
+static void guc_waklv_init(struct xe_guc_ads *ads)
+{
+ struct xe_gt *gt = ads_to_gt(ads);
+ u64 addr_ggtt;
+ u32 offset, remain, size;
+
+ offset = guc_ads_waklv_offset(ads);
+ remain = guc_ads_waklv_size(ads);
+
+ if (XE_WA(gt, 14019882105))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
+ &offset, &remain);
+ if (XE_WA(gt, 18024947630))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
+ &offset, &remain);
+ if (XE_WA(gt, 16022287689))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
+ &offset, &remain);
+
+ /*
+ * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
+ * the default value for this register is determined to be 0xC40. This could change in the
+ * future, so GuC depends on KMD to send it the correct value.
+ */
+ if (XE_WA(gt, 13011645652))
+ guc_waklv_enable_one_word(ads,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
+ 0xC40,
+ &offset, &remain);
+
+ size = guc_ads_waklv_size(ads) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
+ ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
+ ads_blob_write(ads, ads.wa_klv_size, size);
+}
+
+static int calculate_waklv_size(struct xe_guc_ads *ads)
+{
+ /*
+ * A single page is both the minimum size possible and
+ * is sufficiently large enough for all current platforms.
+ */
+ return SZ_4K;
+}
+
#define MAX_GOLDEN_LRC_SIZE (SZ_4K * 64)
int xe_guc_ads_init(struct xe_guc_ads *ads)
@@ -271,10 +397,12 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
ads->regset_size = calculate_regset_size(gt);
+ ads->ads_waklv_size = calculate_waklv_size(ads);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -597,6 +725,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_mapping_table_init(gt, &info_map);
guc_capture_list_init(ads);
guc_doorbell_init(ads);
+ guc_waklv_init(ads);
if (xe->info.has_usm) {
guc_um_init_params(ads);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 4afe44bece4b..2de5decfe0fd 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -20,6 +20,8 @@ struct xe_guc_ads {
size_t golden_lrc_size;
/** @regset_size: size of register set passed to GuC for save/restore */
u32 regset_size;
+ /** @ads_waklv_size: total waklv size supported by platform */
+ u32 ads_waklv_size;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 355edd4d758a..8ac819a7061e 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -21,6 +21,7 @@
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_relay.h"
@@ -143,20 +144,24 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
struct xe_bo *bo;
int err;
- xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
+ xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
- drmm_mutex_init(&xe->drm, &ct->lock);
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
+ err = drmm_mutex_init(&xe->drm, &ct->lock);
+ if (err)
+ return err;
+
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -166,7 +171,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
if (err)
return err;
- xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
@@ -313,9 +318,10 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
int xe_guc_ct_enable(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
int err;
- xe_assert(xe, !xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -336,12 +342,12 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
smp_mb();
wake_up_all(&ct->wq);
- drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
+ xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
return 0;
err_out:
- drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
+ xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
return err;
}
@@ -422,7 +428,7 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
- xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
@@ -435,8 +441,8 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{
lockdep_assert_held(&ct->fast_lock);
- xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
- ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+ xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len;
--ct->g2h_outstanding;
@@ -455,6 +461,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 ct_fence_value, bool want_response)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *h2g = &ct->ctbs.h2g;
u32 cmd[H2G_CT_HEADERS];
u32 tail = h2g->info.tail;
@@ -465,8 +472,8 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
- xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
- xe_assert(xe, tail <= h2g->info.size);
+ xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
+ xe_gt_assert(gt, tail <= h2g->info.size);
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
@@ -515,7 +522,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
/* Update descriptor */
desc_write(xe, h2g, tail, h2g->info.tail);
- trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
+ trace_xe_guc_ctb_h2g(gt->info.id, *(action - 1), full_len,
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
@@ -544,15 +551,15 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
u16 seqno;
int ret;
- xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
- xe_assert(xe, !g2h_len || !g2h_fence);
- xe_assert(xe, !num_g2h || !g2h_fence);
- xe_assert(xe, !g2h_len || num_g2h);
- xe_assert(xe, g2h_len || !num_g2h);
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ xe_gt_assert(gt, !num_g2h || !g2h_fence);
+ xe_gt_assert(gt, !g2h_len || num_g2h);
+ xe_gt_assert(gt, g2h_len || !num_g2h);
lockdep_assert_held(&ct->lock);
if (unlikely(ct->ctbs.h2g.info.broken)) {
@@ -570,7 +577,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- xe_assert(xe, xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
if (g2h_fence) {
g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
@@ -628,12 +635,12 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct drm_device *drm = &ct_to_xe(ct)->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
int ret;
- xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
lockdep_assert_held(&ct->lock);
xe_device_assert_mem_access(ct_to_xe(ct));
@@ -691,7 +698,7 @@ try_again:
return ret;
broken:
- drm_err(drm, "No forward process on H2G, reset required");
+ xe_gt_err(gt, "No forward process on H2G, reset required\n");
xe_guc_ct_print(ct, &p, true);
ct->ctbs.h2g.info.broken = true;
@@ -703,7 +710,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
{
int ret;
- xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+ xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
mutex_lock(&ct->lock);
ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
@@ -771,7 +778,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer, bool no_fail)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct g2h_fence g2h_fence;
int ret = 0;
@@ -813,20 +820,20 @@ retry_same_fence:
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
if (!ret) {
- drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
- g2h_fence.seqno, action[0]);
+ xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
+ g2h_fence.seqno, action[0]);
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
return -ETIME;
}
if (g2h_fence.retry) {
- drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
- action[0], g2h_fence.reason);
+ xe_gt_warn(gt, "H2G retry, action 0x%04x, reason %u",
+ action[0], g2h_fence.reason);
goto retry;
}
if (g2h_fence.fail) {
- drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
- action[0], g2h_fence.error, g2h_fence.hint);
+ xe_gt_err(gt, "H2G send failed, action 0x%04x, error %d, hint %u",
+ action[0], g2h_fence.error, g2h_fence.hint);
ret = -EIO;
}
@@ -895,7 +902,6 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = gt_to_xe(gt);
u32 *hxg = msg_to_hxg(msg);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
@@ -933,7 +939,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
- xe_assert(xe, fence == g2h_fence->seqno);
+ xe_gt_assert(gt, fence == g2h_fence->seqno);
if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
g2h_fence->fail = true;
@@ -961,7 +967,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u32 *hxg = msg_to_hxg(msg);
u32 origin, type;
int ret;
@@ -970,9 +976,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
- drm_err(&xe->drm,
- "G2H channel broken on read, origin=%d, reset required\n",
- origin);
+ xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
+ origin);
ct->ctbs.g2h.info.broken = true;
return -EPROTO;
@@ -989,9 +994,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = parse_g2h_response(ct, msg, len);
break;
default:
- drm_err(&xe->drm,
- "G2H channel broken on read, type=%d, reset required\n",
- type);
+ xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
+ type);
ct->ctbs.g2h.info.broken = true;
ret = -EOPNOTSUPP;
@@ -1002,8 +1006,8 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 *hxg = msg_to_hxg(msg);
u32 action, adj_len;
@@ -1054,18 +1058,21 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
- ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
- ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
+ break;
+ case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
+ ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
break;
default:
- drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
+ xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
if (ret)
- drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
- action, ret);
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
return 0;
}
@@ -1073,13 +1080,14 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
{
struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
u32 tail, head, len;
s32 avail;
u32 action;
u32 *hxg;
- xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1091,7 +1099,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
if (g2h->info.broken)
return -EPIPE;
- xe_assert(xe, xe_guc_ct_enabled(ct));
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
@@ -1107,9 +1115,8 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
sizeof(u32));
len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
if (len > avail) {
- drm_err(&xe->drm,
- "G2H channel broken on read, avail=%d, len=%d, reset required\n",
- avail, len);
+ xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
+ avail, len);
g2h->info.broken = true;
return -EPROTO;
@@ -1162,7 +1169,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
struct xe_guc *guc = ct_to_guc(ct);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 *hxg = msg_to_hxg(msg);
@@ -1181,12 +1188,12 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
default:
- drm_warn(&xe->drm, "NOT_POSSIBLE");
+ xe_gt_warn(gt, "NOT_POSSIBLE");
}
if (ret)
- drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
- action, ret);
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
}
/**
@@ -1203,7 +1210,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
bool ongoing;
int len;
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1216,7 +1223,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
spin_unlock(&ct->fast_lock);
if (ongoing)
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
}
/* Returns less than zero on error, 0 on done, 1 on more available */
@@ -1247,6 +1254,7 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
static void g2h_worker_func(struct work_struct *w)
{
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
+ struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
@@ -1273,7 +1281,7 @@ static void g2h_worker_func(struct work_struct *w)
* responses, if the worker here is blocked on those callbacks
* completing, creating a deadlock.
*/
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1283,8 +1291,7 @@ static void g2h_worker_func(struct work_struct *w)
mutex_unlock(&ct->lock);
if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
- struct drm_device *drm = &ct_to_xe(ct)->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
+ struct drm_printer p = xe_gt_info_printer(gt);
xe_guc_ct_print(ct, &p, false);
kick_reset(ct);
@@ -1292,7 +1299,7 @@ static void g2h_worker_func(struct work_struct *w)
} while (ret == 1);
if (ongoing)
- xe_device_mem_access_put(ct_to_xe(ct));
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
@@ -1394,7 +1401,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
return NULL;
}
- if (xe_guc_ct_enabled(ct)) {
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
snapshot->ct_enabled = true;
snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index ffd7d53bcc42..d3822cbea273 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -14,6 +14,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_log.h"
#include "xe_macros.h"
+#include "xe_pm.h"
static struct xe_guc *node_to_guc(struct drm_info_node *node)
{
@@ -26,9 +27,9 @@ static int guc_info(struct seq_file *m, void *data)
struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_guc_print_info(guc, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
@@ -39,9 +40,9 @@ static int guc_log(struct seq_file *m, void *data)
struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_guc_log_print(&guc->log, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index c281fdbfd2d6..19ee71aeaf17 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -14,6 +14,8 @@
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
+#define GUC_ID_MAX 65535
+
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
@@ -207,7 +209,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index ea49f3885c10..d9b570a154a2 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -14,7 +14,7 @@
#include "xe_guc.h"
#include "xe_map.h"
-static int send_get_hwconfig(struct xe_guc *guc, u32 ggtt_addr, u32 size)
+static int send_get_hwconfig(struct xe_guc *guc, u64 ggtt_addr, u32 size)
{
u32 action[] = {
XE_GUC_ACTION_GET_HWCONFIG,
@@ -78,8 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
guc->hwconfig.bo = bo;
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
new file mode 100644
index 000000000000..0fb7c6b78c31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_id_mgr.h"
+#include "xe_guc_types.h"
+
+static struct xe_guc *idm_to_guc(struct xe_guc_id_mgr *idm)
+{
+ return container_of(idm, struct xe_guc, submission_state.idm);
+}
+
+static struct xe_gt *idm_to_gt(struct xe_guc_id_mgr *idm)
+{
+ return guc_to_gt(idm_to_guc(idm));
+}
+
+static struct xe_device *idm_to_xe(struct xe_guc_id_mgr *idm)
+{
+ return gt_to_xe(idm_to_gt(idm));
+}
+
+#define idm_assert(idm, cond) xe_gt_assert(idm_to_gt(idm), cond)
+#define idm_mutex(idm) (&idm_to_guc(idm)->submission_state.lock)
+
+static void idm_print_locked(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent);
+
+static void __fini_idm(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_id_mgr *idm = arg;
+
+ mutex_lock(idm_mutex(idm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int weight = bitmap_weight(idm->bitmap, idm->total);
+
+ if (weight) {
+ struct drm_printer p = xe_gt_info_printer(idm_to_gt(idm));
+
+ xe_gt_err(idm_to_gt(idm), "GUC ID manager unclean (%u/%u)\n",
+ weight, idm->total);
+ idm_print_locked(idm, &p, 1);
+ }
+ }
+
+ bitmap_free(idm->bitmap);
+ idm->bitmap = NULL;
+ idm->total = 0;
+ idm->used = 0;
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+/**
+ * xe_guc_id_mgr_init() - Initialize GuC context ID Manager.
+ * @idm: the &xe_guc_id_mgr to initialize
+ * @limit: number of IDs to manage
+ *
+ * The bare-metal or PF driver can pass ~0 as &limit to indicate that all
+ * context IDs supported by the GuC firmware are available for use.
+ *
+ * Only VF drivers will have to provide explicit number of context IDs
+ * that they can use.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
+{
+ int ret;
+
+ idm_assert(idm, !idm->bitmap);
+ idm_assert(idm, !idm->total);
+ idm_assert(idm, !idm->used);
+
+ if (limit == ~0)
+ limit = GUC_ID_MAX;
+ else if (limit > GUC_ID_MAX)
+ return -ERANGE;
+ else if (!limit)
+ return -EINVAL;
+
+ idm->bitmap = bitmap_zalloc(limit, GFP_KERNEL);
+ if (!idm->bitmap)
+ return -ENOMEM;
+ idm->total = limit;
+
+ ret = drmm_add_action_or_reset(&idm_to_xe(idm)->drm, __fini_idm, idm);
+ if (ret)
+ return ret;
+
+ xe_gt_info(idm_to_gt(idm), "using %u GUC ID(s)\n", idm->total);
+ return 0;
+}
+
+static unsigned int find_last_zero_area(unsigned long *bitmap,
+ unsigned int total,
+ unsigned int count)
+{
+ unsigned int found = total;
+ unsigned int rs, re, range;
+
+ for_each_clear_bitrange(rs, re, bitmap, total) {
+ range = re - rs;
+ if (range < count)
+ continue;
+ found = rs + (range - count);
+ }
+ return found;
+}
+
+static int idm_reserve_chunk_locked(struct xe_guc_id_mgr *idm,
+ unsigned int count, unsigned int retain)
+{
+ int id;
+
+ idm_assert(idm, count);
+ lockdep_assert_held(idm_mutex(idm));
+
+ if (!idm->total)
+ return -ENODATA;
+
+ if (retain) {
+ /*
+ * For IDs reservations (used on PF for VFs) we want to make
+ * sure there will be at least 'retain' available for the PF
+ */
+ if (idm->used + count + retain > idm->total)
+ return -EDQUOT;
+ /*
+ * ... and we want to reserve highest IDs close to the end.
+ */
+ id = find_last_zero_area(idm->bitmap, idm->total, count);
+ } else {
+ /*
+ * For regular IDs reservations (used by submission code)
+ * we start searching from the lower range of IDs.
+ */
+ id = bitmap_find_next_zero_area(idm->bitmap, idm->total, 0, count, 0);
+ }
+ if (id >= idm->total)
+ return -ENOSPC;
+
+ bitmap_set(idm->bitmap, id, count);
+ idm->used += count;
+
+ return id;
+}
+
+static void idm_release_chunk_locked(struct xe_guc_id_mgr *idm,
+ unsigned int start, unsigned int count)
+{
+ idm_assert(idm, count);
+ idm_assert(idm, count <= idm->used);
+ idm_assert(idm, start < idm->total);
+ idm_assert(idm, start + count - 1 < idm->total);
+ lockdep_assert_held(idm_mutex(idm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int n;
+
+ for (n = 0; n < count; n++)
+ idm_assert(idm, test_bit(start + n, idm->bitmap));
+ }
+ bitmap_clear(idm->bitmap, start, count);
+ idm->used -= count;
+}
+
+/**
+ * xe_guc_id_mgr_reserve_locked() - Reserve one or more GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @count: number of IDs to allocate (can't be 0)
+ *
+ * This function is dedicated for the use by the GuC submission code,
+ * where submission lock is already taken.
+ *
+ * Return: ID of allocated GuC context or a negative error code on failure.
+ */
+int xe_guc_id_mgr_reserve_locked(struct xe_guc_id_mgr *idm, unsigned int count)
+{
+ return idm_reserve_chunk_locked(idm, count, 0);
+}
+
+/**
+ * xe_guc_id_mgr_release_locked() - Release one or more GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @id: the GuC context ID to release
+ * @count: number of IDs to release (can't be 0)
+ *
+ * This function is dedicated for the use by the GuC submission code,
+ * where submission lock is already taken.
+ */
+void xe_guc_id_mgr_release_locked(struct xe_guc_id_mgr *idm, unsigned int id,
+ unsigned int count)
+{
+ return idm_release_chunk_locked(idm, id, count);
+}
+
+/**
+ * xe_guc_id_mgr_reserve() - Reserve a range of GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @count: number of GuC context IDs to reserve (can't be 0)
+ * @retain: number of GuC context IDs to keep available (can't be 0)
+ *
+ * This function is dedicated for the use by the PF driver which expects that
+ * reserved range of IDs will be contiguous and that there will be at least
+ * &retain IDs still available for the PF after this reservation.
+ *
+ * Return: starting ID of the allocated GuC context ID range or
+ * a negative error code on failure.
+ */
+int xe_guc_id_mgr_reserve(struct xe_guc_id_mgr *idm,
+ unsigned int count, unsigned int retain)
+{
+ int ret;
+
+ idm_assert(idm, count);
+ idm_assert(idm, retain);
+
+ mutex_lock(idm_mutex(idm));
+ ret = idm_reserve_chunk_locked(idm, count, retain);
+ mutex_unlock(idm_mutex(idm));
+
+ return ret;
+}
+
+/**
+ * xe_guc_id_mgr_release() - Release a range of GuC context IDs.
+ * @idm: the &xe_guc_id_mgr
+ * @start: the starting ID of GuC context range to release
+ * @count: number of GuC context IDs to release
+ */
+void xe_guc_id_mgr_release(struct xe_guc_id_mgr *idm,
+ unsigned int start, unsigned int count)
+{
+ mutex_lock(idm_mutex(idm));
+ idm_release_chunk_locked(idm, start, count);
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void idm_print_locked(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent)
+{
+ unsigned int rs, re;
+
+ lockdep_assert_held(idm_mutex(idm));
+
+ drm_printf_indent(p, indent, "total %u\n", idm->total);
+ if (!idm->bitmap)
+ return;
+
+ drm_printf_indent(p, indent, "used %u\n", idm->used);
+ for_each_set_bitrange(rs, re, idm->bitmap, idm->total)
+ drm_printf_indent(p, indent, "range %u..%u (%u)\n", rs, re - 1, re - rs);
+}
+
+/**
+ * xe_guc_id_mgr_print() - Print status of GuC ID Manager.
+ * @idm: the &xe_guc_id_mgr to print
+ * @p: the &drm_printer to print to
+ * @indent: tab indentation level
+ */
+void xe_guc_id_mgr_print(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent)
+{
+ mutex_lock(idm_mutex(idm));
+ idm_print_locked(idm, p, indent);
+ mutex_unlock(idm_mutex(idm));
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_id_mgr_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.h b/drivers/gpu/drm/xe/xe_guc_id_mgr.h
new file mode 100644
index 000000000000..368f8c80e4c7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ID_MGR_H_
+#define _XE_GUC_ID_MGR_H_
+
+struct drm_printer;
+struct xe_guc_id_mgr;
+
+int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int count);
+
+int xe_guc_id_mgr_reserve_locked(struct xe_guc_id_mgr *idm, unsigned int count);
+void xe_guc_id_mgr_release_locked(struct xe_guc_id_mgr *idm, unsigned int id, unsigned int count);
+
+int xe_guc_id_mgr_reserve(struct xe_guc_id_mgr *idm, unsigned int count, unsigned int retain);
+void xe_guc_id_mgr_release(struct xe_guc_id_mgr *idm, unsigned int start, unsigned int count);
+
+void xe_guc_id_mgr_print(struct xe_guc_id_mgr *idm, struct drm_printer *p, int indent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
new file mode 100644
index 000000000000..ceca949932a0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <drm/drm_print.h>
+
+#include "abi/guc_klvs_abi.h"
+#include "xe_guc_klv_helpers.h"
+
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
+/**
+ * xe_guc_klv_key_to_string - Convert KLV key into friendly name.
+ * @key: the `GuC KLV`_ key
+ *
+ * Return: name of the KLV key.
+ */
+const char *xe_guc_klv_key_to_string(u16 key)
+{
+ switch (key) {
+ /* VGT POLICY keys */
+ case GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY:
+ return "sched_if_idle";
+ case GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY:
+ return "sample_period";
+ case GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY:
+ return "reset_engine";
+ /* VF CFG keys */
+ case GUC_KLV_VF_CFG_GGTT_START_KEY:
+ return "ggtt_start";
+ case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
+ return "ggtt_size";
+ case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
+ return "lmem_size";
+ case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
+ return "num_contexts";
+ case GUC_KLV_VF_CFG_TILE_MASK_KEY:
+ return "tile_mask";
+ case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
+ return "num_doorbells";
+ case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
+ return "exec_quantum";
+ case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
+ return "preempt_timeout";
+ case GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_KEY:
+ return "begin_db_id";
+ case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
+ return "begin_ctx_id";
+ default:
+ return "(unknown)";
+ }
+}
+
+/**
+ * xe_guc_klv_print - Print content of the buffer with `GuC KLV`_.
+ * @klvs: the buffer with KLVs
+ * @num_dwords: number of dwords (u32) available in the buffer
+ * @p: the &drm_printer
+ *
+ * The buffer may contain more than one KLV.
+ */
+void xe_guc_klv_print(const u32 *klvs, u32 num_dwords, struct drm_printer *p)
+{
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (num_dwords < len) {
+ drm_printf(p, "{ key %#06x : truncated %zu of %zu bytes %*ph } # %s\n",
+ key, num_dwords * sizeof(u32), len * sizeof(u32),
+ (int)(num_dwords * sizeof(u32)), klvs,
+ xe_guc_klv_key_to_string(key));
+ return;
+ }
+
+ switch (len) {
+ case 0:
+ drm_printf(p, "{ key %#06x : no value } # %s\n",
+ key, xe_guc_klv_key_to_string(key));
+ break;
+ case 1:
+ drm_printf(p, "{ key %#06x : 32b value %u } # %s\n",
+ key, klvs[0], xe_guc_klv_key_to_string(key));
+ break;
+ case 2:
+ drm_printf(p, "{ key %#06x : 64b value %#llx } # %s\n",
+ key, make_u64(klvs[1], klvs[0]),
+ xe_guc_klv_key_to_string(key));
+ break;
+ default:
+ drm_printf(p, "{ key %#06x : %zu bytes %*ph } # %s\n",
+ key, len * sizeof(u32), (int)(len * sizeof(u32)),
+ klvs, xe_guc_klv_key_to_string(key));
+ break;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ /* we don't expect any leftovers, fix if KLV header is ever changed */
+ BUILD_BUG_ON(GUC_KLV_LEN_MIN > 1);
+}
+
+/**
+ * xe_guc_klv_count - Count KLVs present in the buffer.
+ * @klvs: the buffer with KLVs
+ * @num_dwords: number of dwords (u32) in the buffer
+ *
+ * Return: number of recognized KLVs or
+ * a negative error code if KLV buffer is truncated.
+ */
+int xe_guc_klv_count(const u32 *klvs, u32 num_dwords)
+{
+ int num_klvs = 0;
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ if (num_dwords < len + GUC_KLV_LEN_MIN)
+ break;
+
+ klvs += GUC_KLV_LEN_MIN + len;
+ num_dwords -= GUC_KLV_LEN_MIN + len;
+ num_klvs++;
+ }
+
+ return num_dwords ? -ENODATA : num_klvs;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.h b/drivers/gpu/drm/xe/xe_guc_klv_helpers.h
new file mode 100644
index 000000000000..b835e0ebe6db
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_KLV_HELPERS_H_
+#define _XE_GUC_KLV_HELPERS_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+const char *xe_guc_klv_key_to_string(u16 key);
+
+void xe_guc_klv_print(const u32 *klvs, u32 num_dwords, struct drm_printer *p);
+int xe_guc_klv_count(const u32 *klvs, u32 num_dwords);
+
+/**
+ * PREP_GUC_KLV - Prepare KLV header value based on provided key and len.
+ * @key: KLV key
+ * @len: KLV length
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV(key, len) \
+ (FIELD_PREP(GUC_KLV_0_KEY, (key)) | \
+ FIELD_PREP(GUC_KLV_0_LEN, (len)))
+
+/**
+ * PREP_GUC_KLV_CONST - Prepare KLV header value based on const key and len.
+ * @key: const KLV key
+ * @len: const KLV length
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV_CONST(key, len) \
+ (FIELD_PREP_CONST(GUC_KLV_0_KEY, (key)) | \
+ FIELD_PREP_CONST(GUC_KLV_0_LEN, (len)))
+
+/**
+ * PREP_GUC_KLV_TAG - Prepare KLV header value based on unique KLV definition tag.
+ * @TAG: unique tag of the KLV definition
+ *
+ * Combine separate KEY and LEN definitions of the KLV identified by the TAG.
+ *
+ * Return: value of the KLV header (u32).
+ */
+#define PREP_GUC_KLV_TAG(TAG) \
+ PREP_GUC_KLV_CONST(GUC_KLV_##TAG##_KEY, GUC_KLV_##TAG##_LEN)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 45135c3520e5..a37ee3419428 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -84,8 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 2839d685631b..509649d0e65e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -145,25 +145,6 @@ static int pc_action_reset(struct xe_guc_pc *pc)
return ret;
}
-static int pc_action_shutdown(struct xe_guc_pc *pc)
-{
- struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
- int ret;
- u32 action[] = {
- GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
- xe_bo_ggtt_addr(pc->bo),
- 0,
- };
-
- ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
- drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
- ERR_PTR(ret));
-
- return ret;
-}
-
static int pc_action_query_task_state(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
@@ -381,8 +362,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
struct xe_device *xe = gt_to_xe(gt);
u32 freq;
- xe_device_mem_access_get(gt_to_xe(gt));
-
/* When in RC6, actual frequency reported will be 0. */
if (GRAPHICS_VERx100(xe) >= 1270) {
freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
@@ -394,8 +373,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
freq = decode_freq(freq);
- xe_device_mem_access_put(gt_to_xe(gt));
-
return freq;
}
@@ -412,14 +389,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(gt_to_xe(gt));
/*
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
*freq = xe_mmio_read32(gt, RPNSWREQ);
@@ -427,9 +403,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
*freq = decode_freq(*freq);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(gt_to_xe(gt));
- return ret;
+ return 0;
}
/**
@@ -451,12 +425,7 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
*/
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
{
- struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
-
- xe_device_mem_access_get(xe);
pc_update_rp_values(pc);
- xe_device_mem_access_put(xe);
return pc->rpe_freq;
}
@@ -485,7 +454,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -511,7 +479,6 @@ fw:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -528,7 +495,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -544,8 +510,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
-
return ret;
}
@@ -561,7 +525,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -577,7 +540,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -594,7 +556,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -610,7 +571,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -623,8 +583,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg, gt_c_state;
- xe_device_mem_access_get(gt_to_xe(gt));
-
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
@@ -633,8 +591,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
}
- xe_device_mem_access_put(gt_to_xe(gt));
-
switch (gt_c_state) {
case GT_C6:
return GT_IDLE_C6;
@@ -654,9 +610,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, GT_GFX_RC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -670,9 +624,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u64 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -743,24 +695,28 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
ret = pc_action_query_task_state(pc);
if (ret)
- return ret;
+ goto out;
/*
* GuC defaults to some RPmax that is not actually achievable without
* overclocking. Let's adjust it to the Hardware RP0, which is the
* regular maximum
*/
- if (pc_get_max_freq(pc) > pc->rp0_freq)
- pc_set_max_freq(pc, pc->rp0_freq);
+ if (pc_get_max_freq(pc) > pc->rp0_freq) {
+ ret = pc_set_max_freq(pc, pc->rp0_freq);
+ if (ret)
+ goto out;
+ }
/*
* Same thing happens for Server platforms where min is listed as
* RPMax
*/
if (pc_get_min_freq(pc) > pc->rp0_freq)
- pc_set_min_freq(pc, pc->rp0_freq);
+ ret = pc_set_min_freq(pc, pc->rp0_freq);
- return 0;
+out:
+ return ret;
}
static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
@@ -801,23 +757,19 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (xe->info.skip_guc_pc)
return 0;
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
if (ret)
- goto out;
+ return ret;
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
xe_gt_idle_disable_c6(gt);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
static void pc_init_pcode_freq(struct xe_guc_pc *pc)
@@ -870,11 +822,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out_fail_force_wake;
+ return ret;
if (xe->info.skip_guc_pc) {
if (xe->info.platform != XE_PVC)
@@ -914,8 +864,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out_fail_force_wake:
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -926,32 +874,17 @@ out_fail_force_wake:
int xe_guc_pc_stop(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
- int ret;
-
- xe_device_mem_access_get(pc_to_xe(pc));
if (xe->info.skip_guc_pc) {
xe_gt_idle_disable_c6(pc_to_gt(pc));
- ret = 0;
- goto out;
+ return 0;
}
mutex_lock(&pc->freq_lock);
pc->freq_ready = false;
mutex_unlock(&pc->freq_lock);
- ret = pc_action_shutdown(pc);
- if (ret)
- goto out;
-
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
- drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
- ret = -EIO;
- }
-
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
/**
@@ -965,13 +898,11 @@ static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
- xe_device_mem_access_get(xe);
xe_gt_idle_disable_c6(pc_to_gt(pc));
- xe_device_mem_access_put(xe);
return;
}
- xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
@@ -998,16 +929,13 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
pc->bo = bo;
- err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ff77bc8da1b2..c7d38469fb46 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -27,6 +27,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_id_mgr.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -236,17 +237,9 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
struct xe_guc *guc = arg;
xa_destroy(&guc->submission_state.exec_queue_lookup);
- ida_destroy(&guc->submission_state.guc_ids);
- bitmap_free(guc->submission_state.guc_ids_bitmap);
free_submit_wq(guc);
- mutex_destroy(&guc->submission_state.lock);
}
-#define GUC_ID_MAX 65535
-#define GUC_ID_NUMBER_MLRC 4096
-#define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
-#define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
-
static const struct xe_exec_queue_ops guc_exec_queue_ops;
static void primelockdep(struct xe_guc *guc)
@@ -269,33 +262,28 @@ int xe_guc_submit_init(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int err;
- guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
+ err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
+ if (err)
+ return err;
+
+ err = xe_guc_id_mgr_init(&guc->submission_state.idm, ~0);
+ if (err)
+ return err;
err = alloc_submit_wq(guc);
- if (err) {
- bitmap_free(guc->submission_state.guc_ids_bitmap);
+ if (err)
return err;
- }
gt->exec_queue_ops = &guc_exec_queue_ops;
- mutex_init(&guc->submission_state.lock);
xa_init(&guc->submission_state.exec_queue_lookup);
- ida_init(&guc->submission_state.guc_ids);
spin_lock_init(&guc->submission_state.suspend.lock);
guc->submission_state.suspend.context = dma_fence_context_alloc(1);
primelockdep(guc);
- err = drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
@@ -307,12 +295,8 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
for (i = 0; i < xa_count; ++i)
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
- if (xe_exec_queue_is_parallel(q))
- bitmap_release_region(guc->submission_state.guc_ids_bitmap,
- q->guc->id - GUC_ID_START_MLRC,
- order_base_2(q->width));
- else
- ida_free(&guc->submission_state.guc_ids, q->guc->id);
+ xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
+ q->guc->id, q->width);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -330,21 +314,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
*/
lockdep_assert_held(&guc->submission_state.lock);
- if (xe_exec_queue_is_parallel(q)) {
- void *bitmap = guc->submission_state.guc_ids_bitmap;
-
- ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
- order_base_2(q->width));
- } else {
- ret = ida_alloc_max(&guc->submission_state.guc_ids,
- GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
- }
+ ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
+ q->width);
if (ret < 0)
return ret;
q->guc->id = ret;
- if (xe_exec_queue_is_parallel(q))
- q->guc->id += GUC_ID_START_MLRC;
for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
@@ -533,7 +508,7 @@ static void register_engine(struct xe_exec_queue *q)
info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
if (xe_exec_queue_is_parallel(q)) {
- u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
+ u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
struct iosys_map map = xe_lrc_parallel_map(lrc);
info.wq_desc_lo = lower_32_bits(ggtt_addr +
@@ -833,7 +808,9 @@ static void simple_error_capture(struct xe_exec_queue *q)
}
}
- xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ if (xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL))
+ xe_gt_info(guc_to_gt(guc),
+ "failed to get forcewake for error capture");
xe_guc_ct_print(&guc->ct, &p, true);
guc_exec_queue_print(q, &p);
for_each_hw_engine(hwe, guc_to_gt(guc), id) {
@@ -929,20 +906,26 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int err = -ETIME;
int i = 0;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
- xe_sched_job_seqno(job), q->guc->id, q->flags);
- xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
- "Kernel-submitted job timed out\n");
- xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
- "VM job timed out on non-killed execqueue\n");
+ /*
+ * TDR has fired before free job worker. Common if exec queue
+ * immediately closed after last fence signaled.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
+ guc_exec_queue_free_job(drm_job);
- simple_error_capture(q);
- xe_devcoredump(job);
- } else {
- drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
- xe_sched_job_seqno(job), q->guc->id, q->flags);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
+
+ drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), q->guc->id, q->flags);
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
+ "Kernel-submitted job timed out\n");
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
+ "VM job timed out on non-killed execqueue\n");
+
+ simple_error_capture(q);
+ xe_devcoredump(job);
+
trace_xe_sched_job_timedout(job);
/* Kill the run_job entry point */
@@ -1220,7 +1203,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
- q->sched_props.job_timeout_ms;
+ msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
@@ -1568,28 +1551,8 @@ static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
}
-int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q)
{
- struct xe_device *xe = guc_to_xe(guc);
- struct xe_exec_queue *q;
- u32 guc_id = msg[0];
-
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
- return -EPROTO;
- }
-
- q = g2h_exec_queue_lookup(guc, guc_id);
- if (unlikely(!q))
- return -EPROTO;
-
- if (unlikely(!exec_queue_pending_enable(q) &&
- !exec_queue_pending_disable(q))) {
- drm_err(&xe->drm, "Unexpected engine state 0x%04x",
- atomic_read(&q->guc->state));
- return -EPROTO;
- }
-
trace_xe_exec_queue_scheduling_done(q);
if (exec_queue_pending_enable(q)) {
@@ -1609,17 +1572,15 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
deregister_exec_queue(guc, q);
}
}
-
- return 0;
}
-int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
u32 guc_id = msg[0];
- if (unlikely(len < 1)) {
+ if (unlikely(len < 2)) {
drm_err(&xe->drm, "Invalid length %u", len);
return -EPROTO;
}
@@ -1628,13 +1589,20 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
if (unlikely(!q))
return -EPROTO;
- if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
- exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
+ if (unlikely(!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q))) {
drm_err(&xe->drm, "Unexpected engine state 0x%04x",
atomic_read(&q->guc->state));
return -EPROTO;
}
+ handle_sched_done(guc, q);
+
+ return 0;
+}
+
+static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
+{
trace_xe_exec_queue_deregister_done(q);
clear_exec_queue_registered(q);
@@ -1643,6 +1611,31 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
xe_exec_queue_put(q);
else
__guc_exec_queue_fini(guc, q);
+}
+
+int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 1)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
+ exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
+ drm_err(&xe->drm, "Unexpected engine state 0x%04x",
+ atomic_read(&q->guc->state));
+ return -EPROTO;
+ }
+
+ handle_deregister_done(guc, q);
return 0;
}
@@ -1782,7 +1775,7 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
/**
* xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @job: faulty Xe scheduled job.
+ * @q: faulty exec queue
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -1791,9 +1784,8 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
* caller, using `xe_guc_exec_queue_snapshot_free`.
*/
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
{
- struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc_submit_exec_queue_snapshot *snapshot;
int i;
@@ -1814,21 +1806,14 @@ xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
snapshot->sched_props.preempt_timeout_us =
q->sched_props.preempt_timeout_us;
- snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
+ snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
GFP_ATOMIC);
if (snapshot->lrc) {
for (i = 0; i < q->width; ++i) {
struct xe_lrc *lrc = q->lrc + i;
- snapshot->lrc[i].context_desc =
- lower_32_bits(xe_lrc_ggtt_addr(lrc));
- snapshot->lrc[i].head = xe_lrc_ring_head(lrc);
- snapshot->lrc[i].tail.internal = lrc->ring.tail;
- snapshot->lrc[i].tail.memory =
- xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
- snapshot->lrc[i].start_seqno = xe_lrc_start_seqno(lrc);
- snapshot->lrc[i].seqno = xe_lrc_seqno(lrc);
+ snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
}
}
@@ -1867,6 +1852,24 @@ xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
}
/**
+ * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
+ * @snapshot: Previously captured snapshot of job.
+ *
+ * This captures some data that requires taking some locks, so it cannot be done in signaling path.
+ */
+void
+xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
+{
+ int i;
+
+ if (!snapshot || !snapshot->lrc)
+ return;
+
+ for (i = 0; i < snapshot->width; ++i)
+ xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
+}
+
+/**
* xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
* @snapshot: GuC Submit Engine snapshot object.
* @p: drm_printer where it will be printed out.
@@ -1894,18 +1897,9 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
drm_printf(p, "\tPreempt timeout: %u (us)\n",
snapshot->sched_props.preempt_timeout_us);
- for (i = 0; snapshot->lrc && i < snapshot->width; ++i) {
- drm_printf(p, "\tHW Context Desc: 0x%08x\n",
- snapshot->lrc[i].context_desc);
- drm_printf(p, "\tLRC Head: (memory) %u\n",
- snapshot->lrc[i].head);
- drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
- snapshot->lrc[i].tail.internal,
- snapshot->lrc[i].tail.memory);
- drm_printf(p, "\tStart seqno: (memory) %d\n",
- snapshot->lrc[i].start_seqno);
- drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
- }
+ for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
+ xe_lrc_snapshot_print(snapshot->lrc[i], p);
+
drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
@@ -1930,10 +1924,16 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
*/
void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
{
+ int i;
+
if (!snapshot)
return;
- kfree(snapshot->lrc);
+ if (snapshot->lrc) {
+ for (i = 0; i < snapshot->width; i++)
+ xe_lrc_snapshot_free(snapshot->lrc[i]);
+ kfree(snapshot->lrc);
+ }
kfree(snapshot->pending_list);
kfree(snapshot);
}
@@ -1941,28 +1941,10 @@ void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *s
static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
{
struct xe_guc_submit_exec_queue_snapshot *snapshot;
- struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job;
- bool found = false;
-
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
- if (job->q == q) {
- xe_sched_job_get(job);
- found = true;
- break;
- }
- }
- spin_unlock(&sched->base.job_list_lock);
- if (!found)
- return;
-
- snapshot = xe_guc_exec_queue_snapshot_capture(job);
+ snapshot = xe_guc_exec_queue_snapshot_capture(q);
xe_guc_exec_queue_snapshot_print(snapshot, p);
xe_guc_exec_queue_snapshot_free(snapshot);
-
- xe_sched_job_put(job);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 723dc2bd8df9..fad0421ead36 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
struct drm_printer;
+struct xe_exec_queue;
struct xe_guc;
-struct xe_sched_job;
int xe_guc_submit_init(struct xe_guc *guc);
@@ -27,7 +27,9 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job);
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
+void
+xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
index 72fc0f42b0a5..dc7456c34583 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -61,17 +61,6 @@ struct guc_submit_parallel_scratch {
u32 wq[WQ_SIZE / sizeof(u32)];
};
-struct lrc_snapshot {
- u32 context_desc;
- u32 head;
- struct {
- u32 internal;
- u32 memory;
- } tail;
- u32 start_seqno;
- u32 seqno;
-};
-
struct pending_list_snapshot {
u32 seqno;
bool fence;
@@ -109,7 +98,7 @@ struct xe_guc_submit_exec_queue_snapshot {
} sched_props;
/** @lrc: LRC Snapshot */
- struct lrc_snapshot *lrc;
+ struct xe_lrc_snapshot **lrc;
/** @schedule_state: Schedule State at the moment of Crash */
u32 schedule_state;
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index edcd1a950bd3..82bd93f7867d 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -32,6 +32,21 @@ struct xe_guc_db_mgr {
};
/**
+ * struct xe_guc_id_mgr - GuC context ID Manager.
+ *
+ * Note: GuC context ID Manager is relying on &xe_guc::submission_state.lock
+ * to protect its members.
+ */
+struct xe_guc_id_mgr {
+ /** @bitmap: bitmap to track allocated IDs */
+ unsigned long *bitmap;
+ /** @total: total number of IDs being managed */
+ unsigned int total;
+ /** @used: number of IDs currently in use */
+ unsigned int used;
+};
+
+/**
* struct xe_guc - Graphic micro controller
*/
struct xe_guc {
@@ -49,12 +64,10 @@ struct xe_guc {
struct xe_guc_db_mgr dbm;
/** @submission_state: GuC submission state */
struct {
+ /** @submission_state.idm: GuC context ID Manager */
+ struct xe_guc_id_mgr idm;
/** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
- /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
- struct ida guc_ids;
- /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
- unsigned long *guc_ids_bitmap;
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
/** @submission_state.lock: protects submission state */
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
new file mode 100644
index 000000000000..2c32dc46f7d4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/mmu_notifier.h>
+#include <linux/dma-mapping.h>
+#include <linux/memremap.h>
+#include <linux/swap.h>
+#include <linux/hmm.h>
+#include <linux/mm.h>
+#include "xe_hmm.h"
+#include "xe_vm.h"
+#include "xe_bo.h"
+
+static u64 xe_npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/*
+ * xe_mark_range_accessed() - mark a range is accessed, so core mm
+ * have such information for memory eviction or write back to
+ * hard disk
+ *
+ * @range: the range to mark
+ * @write: if write to this range, we mark pages in this range
+ * as dirty
+ */
+static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+{
+ struct page *page;
+ u64 i, npages;
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages; i++) {
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (write)
+ set_page_dirty_lock(page);
+
+ mark_page_accessed(page);
+ }
+}
+
+/*
+ * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
+ * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+ * and will be used to program GPU page table later.
+ *
+ * @xe: the xe device who will access the dma-address in sg table
+ * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+ * has the pfn numbers of pages that back up this hmm address range.
+ * @st: pointer to the sg table.
+ * @write: whether we write to this range. This decides dma map direction
+ * for system pages. If write we map it bi-diretional; otherwise
+ * DMA_TO_DEVICE
+ *
+ * All the contiguous pfns will be collapsed into one entry in
+ * the scatter gather table. This is for the purpose of efficiently
+ * programming GPU page table.
+ *
+ * The dma_address in the sg table will later be used by GPU to
+ * access memory. So if the memory is system memory, we need to
+ * do a dma-mapping so it can be accessed by GPU/DMA.
+ *
+ * FIXME: This function currently only support pages in system
+ * memory. If the memory is GPU local memory (of the GPU who
+ * is going to access memory), we need gpu dpa (device physical
+ * address), and there is no need of dma-mapping. This is TBD.
+ *
+ * FIXME: dma-mapping for peer gpu device to access remote gpu's
+ * memory. Add this when you support p2p
+ *
+ * This function allocates the storage of the sg table. It is
+ * caller's responsibility to free it calling sg_free_table.
+ *
+ * Returns 0 if successful; -ENOMEM if fails to allocate memory
+ */
+static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+ struct sg_table *st, bool write)
+{
+ struct device *dev = xe->drm.dev;
+ struct page **pages;
+ u64 i, npages;
+ int ret;
+
+ npages = xe_npages_in_range(range->start, range->end);
+ pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
+ xe_assert(xe, !is_device_private_page(pages[i]));
+ }
+
+ ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
+ xe_sg_segment_size(dev), GFP_KERNEL);
+ if (ret)
+ goto free_pages;
+
+ ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+ if (ret) {
+ sg_free_table(st);
+ st = NULL;
+ }
+
+free_pages:
+ kvfree(pages);
+ return ret;
+}
+
+/*
+ * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
+ *
+ * @uvma: the userptr vma which hold the scatter gather table
+ *
+ * With function xe_userptr_populate_range, we allocate storage of
+ * the userptr sg table. This is a helper function to free this
+ * sg table, and dma unmap the address in the table.
+ */
+void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ struct device *dev = xe->drm.dev;
+
+ xe_assert(xe, userptr->sg);
+ dma_unmap_sgtable(dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+}
+
+/**
+ * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
+ * address range
+ *
+ * @uvma: userptr vma which has information of the range to populate.
+ * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
+ *
+ * This function populate the physical pages of a virtual
+ * address range. The populated physical pages is saved in
+ * userptr's sg table. It is similar to get_user_pages but call
+ * hmm_range_fault.
+ *
+ * This function also read mmu notifier sequence # (
+ * mmu_interval_read_begin), for the purpose of later
+ * comparison (through mmu_interval_read_retry).
+ *
+ * This must be called with mmap read or write lock held.
+ *
+ * This function allocates the storage of the userptr sg table.
+ * It is caller's responsibility to free it calling sg_free_table.
+ *
+ * returns: 0 for succuss; negative error no on failure
+ */
+int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ bool is_mm_mmap_locked)
+{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ struct xe_userptr *userptr;
+ struct xe_vma *vma = &uvma->vma;
+ u64 userptr_start = xe_vma_userptr(vma);
+ u64 userptr_end = userptr_start + xe_vma_size(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct hmm_range hmm_range;
+ bool write = !xe_vma_read_only(vma);
+ unsigned long notifier_seq;
+ u64 npages;
+ int ret;
+
+ userptr = &uvma->userptr;
+
+ if (is_mm_mmap_locked)
+ mmap_assert_locked(userptr->notifier.mm);
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+ if (notifier_seq == userptr->notifier_seq)
+ return 0;
+
+ if (userptr->sg)
+ xe_hmm_userptr_free_sg(uvma);
+
+ npages = xe_npages_in_range(userptr_start, userptr_end);
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns))
+ return -ENOMEM;
+
+ if (write)
+ flags |= HMM_PFN_REQ_WRITE;
+
+ if (!mmget_not_zero(userptr->notifier.mm)) {
+ ret = -EFAULT;
+ goto free_pfns;
+ }
+
+ hmm_range.default_flags = flags;
+ hmm_range.hmm_pfns = pfns;
+ hmm_range.notifier = &userptr->notifier;
+ hmm_range.start = userptr_start;
+ hmm_range.end = userptr_end;
+ hmm_range.dev_private_owner = vm->xe;
+
+ while (true) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+
+ if (!is_mm_mmap_locked)
+ mmap_read_lock(userptr->notifier.mm);
+
+ ret = hmm_range_fault(&hmm_range);
+
+ if (!is_mm_mmap_locked)
+ mmap_read_unlock(userptr->notifier.mm);
+
+ if (ret == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ continue;
+ }
+ break;
+ }
+
+ mmput(userptr->notifier.mm);
+
+ if (ret)
+ goto free_pfns;
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ if (ret)
+ goto free_pfns;
+
+ xe_mark_range_accessed(&hmm_range, write);
+ userptr->sg = &userptr->sgt;
+ userptr->notifier_seq = hmm_range.notifier_seq;
+
+free_pfns:
+ kvfree(pfns);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
new file mode 100644
index 000000000000..909dc2bdcd97
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+struct xe_userptr_vma;
+
+int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index b545f850087c..39a484a57585 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -53,26 +53,19 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
- int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
PXP43_HUC_AUTH_INOUT_SIZE * 2,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
huc->gsc_pkt = bo;
- err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
- if (err) {
- free_gsc_pkt(&xe->drm, huc);
- return err;
- }
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/gpu/drm/xe/xe_huc_debugfs.c b/drivers/gpu/drm/xe/xe_huc_debugfs.c
index 18585a7eeb9d..3a888a40188b 100644
--- a/drivers/gpu/drm/xe/xe_huc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_huc_debugfs.c
@@ -12,6 +12,7 @@
#include "xe_gt.h"
#include "xe_huc.h"
#include "xe_macros.h"
+#include "xe_pm.h"
static struct xe_gt *
huc_to_gt(struct xe_huc *huc)
@@ -36,9 +37,9 @@ static int huc_info(struct seq_file *m, void *data)
struct xe_device *xe = huc_to_xe(huc);
struct drm_printer p = drm_seq_file_printer(m);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
xe_huc_print_info(huc, &p);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index b5e83ea172f3..455f375c1cbd 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -14,8 +14,10 @@
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
+#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -463,6 +465,32 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
+
+ /*
+ * The GSC engine can accept submissions while the GSC shim is
+ * being reset, during which time the submission is stalled. In
+ * the worst case, the shim reset can take up to the maximum GSC
+ * command execution time (250ms), so the request start can be
+ * delayed by that much; the request itself can take that long
+ * without being preemptible, which means worst case it can
+ * theoretically take up to 500ms for a preemption to go through
+ * on the GSC engine. Adding to that an extra 100ms as a safety
+ * margin, we get a minimum recommended timeout of 600ms.
+ * The preempt_timeout value can't be tuned for OTHER_CLASS
+ * because the class is reserved for kernel usage, so we just
+ * need to make sure that the starting value is above that
+ * threshold; since our default value (640ms) is greater than
+ * 600ms, the only way we can go below is via a kconfig setting.
+ * If that happens, log it in dmesg and update the value.
+ */
+ if (hwe->class == XE_ENGINE_CLASS_OTHER) {
+ const u32 min_preempt_timeout = 600 * 1000;
+ if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
+ hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
+ xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
+ }
+ }
+
/* Record default props */
hwe->eclass->defaults = hwe->eclass->sched_props;
}
@@ -490,8 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp);
goto err_name;
@@ -509,18 +538,19 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
}
}
- if (xe_device_uc_enabled(xe))
+ if (xe_device_uc_enabled(xe)) {
+ /* GSCCS has a special interrupt for reset */
+ if (hwe->class == XE_ENGINE_CLASS_OTHER)
+ hwe->irq_handler = xe_gsc_hwe_irq_handler;
+
xe_hw_engine_enable_ring(hwe);
+ }
/* We reserve the highest BCS instance for USM */
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
- if (err)
- return err;
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
err_kernel_lrc:
xe_lrc_finish(&hwe->kernel_lrc);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index 2345fb42fa39..844ec68cbbb8 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -7,8 +7,10 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
+#include "xe_pm.h"
#define MAX_ENGINE_CLASS_NAME_LEN 16
static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
@@ -70,7 +72,7 @@ static ssize_t job_timeout_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_max);
}
static const struct kobj_attribute job_timeout_max_attr =
@@ -106,7 +108,7 @@ static ssize_t job_timeout_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_min);
}
static const struct kobj_attribute job_timeout_min_attr =
@@ -139,7 +141,7 @@ static ssize_t job_timeout_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.job_timeout_ms);
}
static const struct kobj_attribute job_timeout_attr =
@@ -150,7 +152,7 @@ static ssize_t job_timeout_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_ms);
}
static const struct kobj_attribute job_timeout_def =
@@ -161,7 +163,7 @@ static ssize_t job_timeout_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_min);
}
static const struct kobj_attribute job_timeout_min_def =
@@ -172,7 +174,7 @@ static ssize_t job_timeout_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.job_timeout_max);
}
static const struct kobj_attribute job_timeout_max_def =
@@ -231,7 +233,7 @@ static ssize_t timeslice_duration_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_max);
}
static const struct kobj_attribute timeslice_duration_max_attr =
@@ -269,7 +271,7 @@ static ssize_t timeslice_duration_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_min);
}
static const struct kobj_attribute timeslice_duration_min_attr =
@@ -281,7 +283,7 @@ static ssize_t timeslice_duration_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.timeslice_us);
}
static const struct kobj_attribute timeslice_duration_attr =
@@ -293,7 +295,7 @@ static ssize_t timeslice_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_us);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_us);
}
static const struct kobj_attribute timeslice_duration_def =
@@ -304,7 +306,7 @@ static ssize_t timeslice_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_min);
}
static const struct kobj_attribute timeslice_duration_min_def =
@@ -315,7 +317,7 @@ static ssize_t timeslice_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.timeslice_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.timeslice_max);
}
static const struct kobj_attribute timeslice_duration_max_def =
@@ -348,7 +350,7 @@ static ssize_t preempt_timeout_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
}
static const struct kobj_attribute preempt_timeout_attr =
@@ -360,7 +362,7 @@ static ssize_t preempt_timeout_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_us);
}
static const struct kobj_attribute preempt_timeout_def =
@@ -372,7 +374,7 @@ static ssize_t preempt_timeout_min_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_min);
}
static const struct kobj_attribute preempt_timeout_min_def =
@@ -384,7 +386,7 @@ static ssize_t preempt_timeout_max_default(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
- return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->defaults.preempt_timeout_max);
}
static const struct kobj_attribute preempt_timeout_max_def =
@@ -420,7 +422,7 @@ static ssize_t preempt_timeout_max_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
}
static const struct kobj_attribute preempt_timeout_max_attr =
@@ -457,7 +459,7 @@ static ssize_t preempt_timeout_min_show(struct kobject *kobj,
{
struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
- return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
+ return sysfs_emit(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
}
static const struct kobj_attribute preempt_timeout_min_attr =
@@ -498,8 +500,8 @@ static void kobj_xe_hw_engine_class_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
- static struct kobj_eclass *
-kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name)
+static struct kobj_eclass *
+kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, const char *name)
{
struct kobj_eclass *keclass;
int err = 0;
@@ -513,13 +515,13 @@ kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name
kobject_put(&keclass->base);
return NULL;
}
+ keclass->xe = xe;
err = drmm_add_action_or_reset(&xe->drm, kobj_xe_hw_engine_class_fini,
&keclass->base);
if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return NULL;
+
return keclass;
}
@@ -550,13 +552,8 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (err)
goto err_object;
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_defaults_fini,
- kobj);
- if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- return err;
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_class_defaults_fini, kobj);
+
err_object:
kobject_put(kobj);
return err;
@@ -567,9 +564,51 @@ static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
.release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &kobj_sysfs_ops,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
};
static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
@@ -579,6 +618,24 @@ static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(kobj);
}
+static const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
+{
+ switch (class) {
+ case XE_ENGINE_CLASS_RENDER:
+ return "rcs";
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ return "vcs";
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ return "vecs";
+ case XE_ENGINE_CLASS_COPY:
+ return "bcs";
+ case XE_ENGINE_CLASS_COMPUTE:
+ return "ccs";
+ default:
+ return NULL;
+ }
+}
+
/**
* xe_hw_engine_class_sysfs_init - Init HW engine classes on GT.
* @gt: Xe GT.
@@ -608,7 +665,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
goto err_object;
for_each_hw_engine(hwe, gt, id) {
- char name[MAX_ENGINE_CLASS_NAME_LEN];
+ const char *name;
struct kobj_eclass *keclass;
if (hwe->class == XE_ENGINE_CLASS_OTHER ||
@@ -619,24 +676,8 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
continue;
class_mask |= 1 << hwe->class;
-
- switch (hwe->class) {
- case XE_ENGINE_CLASS_RENDER:
- strcpy(name, "rcs");
- break;
- case XE_ENGINE_CLASS_VIDEO_DECODE:
- strcpy(name, "vcs");
- break;
- case XE_ENGINE_CLASS_VIDEO_ENHANCE:
- strcpy(name, "vecs");
- break;
- case XE_ENGINE_CLASS_COPY:
- strcpy(name, "bcs");
- break;
- case XE_ENGINE_CLASS_COMPUTE:
- strcpy(name, "ccs");
- break;
- default:
+ name = xe_hw_engine_class_to_str(hwe->class);
+ if (!name) {
err = -EINVAL;
goto err_object;
}
@@ -649,26 +690,16 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
keclass->eclass = hwe->eclass;
err = xe_add_hw_engine_class_defaults(xe, &keclass->base);
- if (err) {
- drm_warn(&xe->drm,
- "Add .defaults to engines failed!, err: %d\n",
- err);
+ if (err)
goto err_object;
- }
err = sysfs_create_files(&keclass->base, files);
if (err)
goto err_object;
}
- err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_sysfs_fini,
- kobj);
- if (err)
- drm_warn(&xe->drm,
- "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, hw_engine_class_sysfs_fini, kobj);
- return err;
err_object:
kobject_put(kobj);
return err;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
index ec5ba673b314..28a0d7c909c0 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
@@ -26,6 +26,8 @@ struct kobj_eclass {
struct kobject base;
/** @eclass: A pointer to the hw engine class interface */
struct xe_hw_engine_class_intf *eclass;
+ /** @xe: A pointer to the xe device */
+ struct xe_device *xe;
};
static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kobj)
@@ -33,4 +35,9 @@ static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kob
return container_of(kobj, struct kobj_eclass, base)->eclass;
}
+static inline struct xe_device *kobj_to_xe(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_eclass, base)->xe;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index a5de3e7b0bd6..f872ef103127 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -130,7 +130,7 @@ void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
ctx->irq = irq;
ctx->dma_fence_ctx = dma_fence_context_alloc(1);
ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
- sprintf(ctx->name, "%s", name);
+ snprintf(ctx->name, sizeof(ctx->name), "%s", name);
}
void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b82233a41606..453e601ddd5e 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -18,6 +18,7 @@
#include "xe_pcode.h"
#include "xe_pcode_api.h"
#include "xe_sriov.h"
+#include "xe_pm.h"
enum xe_hwmon_reg {
REG_PKG_RAPL_LIMIT,
@@ -33,6 +34,12 @@ enum xe_hwmon_reg_operation {
REG_READ64,
};
+enum xe_hwmon_channel {
+ CHANNEL_CARD,
+ CHANNEL_PKG,
+ CHANNEL_MAX,
+};
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -68,61 +75,61 @@ struct xe_hwmon {
int scl_shift_energy;
/** @scl_shift_time: pkg time unit */
int scl_shift_time;
- /** @ei: Energy info for energy1_input */
- struct xe_hwmon_energy_info ei;
+ /** @ei: Energy info for energyN_input */
+ struct xe_hwmon_energy_info ei[CHANNEL_MAX];
};
-static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
+static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+ int channel)
{
struct xe_device *xe = gt_to_xe(hwmon->gt);
- struct xe_reg reg = XE_REG(0);
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_RAPL_LIMIT;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PACKAGE_RAPL_LIMIT;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_RAPL_LIMIT;
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PACKAGE_POWER_SKU;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+ return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
+ return PCU_CR_PACKAGE_POWER_SKU_UNIT;
break;
case REG_GT_PERF_STATUS:
- if (xe->info.platform == XE_DG2)
- reg = GT_PERF_STATUS;
+ if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
+ return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_PVC)
- reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
- else if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_ENERGY_STATUS;
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ return PVC_GT0_PLATFORM_ENERGY_STATUS;
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
+ return PCU_CR_PACKAGE_ENERGY_STATUS;
break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
}
- return reg.raw;
+ return XE_REG(0);
}
static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
enum xe_hwmon_reg_operation operation, u64 *value,
- u32 clr, u32 set)
+ u32 clr, u32 set, int channel)
{
struct xe_reg reg;
- reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
+ reg = xe_hwmon_get_reg(hwmon, hwmon_reg, channel);
- if (!reg.raw)
+ if (!xe_reg_is_valid(reg))
return;
switch (operation) {
@@ -150,13 +157,13 @@ static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
mutex_lock(&hwmon->hwmon_lock);
- xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0, channel);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@@ -166,7 +173,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0, channel);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@@ -178,7 +185,7 @@ unlock:
mutex_unlock(&hwmon->hwmon_lock);
}
-static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
int ret = 0;
u64 reg_val;
@@ -188,9 +195,9 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
- PKG_PWR_LIM_1_EN, 0);
+ PKG_PWR_LIM_1_EN, 0, channel);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
- PKG_PWR_LIM_1_EN, 0);
+ PKG_PWR_LIM_1_EN, 0, channel);
if (reg_val & PKG_PWR_LIM_1_EN) {
ret = -EOPNOTSUPP;
@@ -203,17 +210,17 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
- PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val, channel);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
}
-static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val;
- xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0, channel);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -236,16 +243,16 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
* the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
* a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
* hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
- * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
+ * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
*/
static void
-xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
+xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
- struct xe_hwmon_energy_info *ei = &hwmon->ei;
+ struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
- &reg_val, 0, 0);
+ &reg_val, 0, 0, channel);
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@@ -259,23 +266,24 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
}
static ssize_t
-xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
+ int sensor_index = to_sensor_dev_attr(attr)->index;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
- REG_READ32, &r, 0, 0);
+ REG_READ32, &r, 0, 0, sensor_index);
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -290,7 +298,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -299,14 +307,15 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
}
static ssize_t
-xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
int ret;
+ int sensor_index = to_sensor_dev_attr(attr)->index;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -325,12 +334,12 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
/*
* val must be < max in hwmon interface units. The steps below are
- * explained in xe_hwmon_power1_max_interval_show()
+ * explained in xe_hwmon_power_max_interval_show()
*/
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
@@ -354,26 +363,31 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
- PKG_PWR_LIM_1_TIME, rxy);
+ PKG_PWR_LIM_1_TIME, rxy, sensor_index);
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return count;
}
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
- xe_hwmon_power1_max_interval_show,
- xe_hwmon_power1_max_interval_store, 0);
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, CHANNEL_CARD);
+
+static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, CHANNEL_PKG);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_max_interval.dev_attr.attr,
NULL
};
@@ -384,12 +398,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
- if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
- ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
+ ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -405,10 +418,11 @@ static const struct attribute_group *hwmon_groups[] = {
};
static const struct hwmon_channel_info * const hwmon_info[] = {
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
- HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
- HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
NULL
};
@@ -431,7 +445,8 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
uval);
}
-static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
+ long *value, u32 scale_factor)
{
int ret;
u32 uval;
@@ -449,7 +464,8 @@ unlock:
return ret;
}
-static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
+ long value, u32 scale_factor)
{
int ret;
u32 uval;
@@ -463,117 +479,131 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u3
return ret;
}
-static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
+static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val;
xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
- REG_READ32, &reg_val, 0, 0);
+ REG_READ32, &reg_val, 0, 0, channel);
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
static umode_t
-xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
+xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel)) ? 0664 : 0;
case hwmon_power_rated_max:
- return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
+ channel)) ? 0444 : 0;
case hwmon_power_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
- !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ if (channel == CHANNEL_PKG)
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ break;
+ case hwmon_power_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
+ channel)) ? 0444 : 0;
default:
return 0;
}
+ return 0;
}
static int
-xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
+xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
- xe_hwmon_power_max_read(hwmon, val);
+ xe_hwmon_power_max_read(hwmon, channel, val);
return 0;
case hwmon_power_rated_max:
- xe_hwmon_power_rated_max_read(hwmon, val);
+ xe_hwmon_power_rated_max_read(hwmon, channel, val);
return 0;
case hwmon_power_crit:
- return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
+ return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
}
static int
-xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
+xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_write(hwmon, val);
+ return xe_hwmon_power_max_write(hwmon, channel, val);
case hwmon_power_crit:
- return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
+ return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
default:
return -EOPNOTSUPP;
}
}
static umode_t
-xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
switch (attr) {
case hwmon_curr_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
- (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ case hwmon_curr_label:
+ if (channel == CHANNEL_PKG)
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ break;
default:
return 0;
}
+ return 0;
}
static int
-xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_curr_crit:
- return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
+ return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
}
static int
-xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
+xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_curr_crit:
- return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
+ return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
default:
return -EOPNOTSUPP;
}
}
static umode_t
-xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
switch (attr) {
case hwmon_in_input:
- return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
+ case hwmon_in_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
+ channel)) ? 0444 : 0;
default:
return 0;
}
}
static int
-xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_in_input:
- xe_hwmon_get_voltage(hwmon, val);
+ xe_hwmon_get_voltage(hwmon, channel, val);
return 0;
default:
return -EOPNOTSUPP;
@@ -581,22 +611,24 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
}
static umode_t
-xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
+xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
switch (attr) {
case hwmon_energy_input:
- return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
+ case hwmon_energy_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
default:
return 0;
}
}
static int
-xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_energy_input:
- xe_hwmon_energy_get(hwmon, val);
+ xe_hwmon_energy_get(hwmon, channel, val);
return 0;
default:
return -EOPNOTSUPP;
@@ -610,27 +642,27 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_is_visible(hwmon, attr);
+ ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
break;
case hwmon_in:
- ret = xe_hwmon_in_is_visible(hwmon, attr);
+ ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
break;
case hwmon_energy:
- ret = xe_hwmon_energy_is_visible(hwmon, attr);
+ ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
default:
ret = 0;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -642,27 +674,27 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_read(hwmon, attr, channel, val);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_read(hwmon, attr, val);
+ ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
break;
case hwmon_in:
- ret = xe_hwmon_in_read(hwmon, attr, val);
+ ret = xe_hwmon_in_read(hwmon, attr, channel, val);
break;
case hwmon_energy:
- ret = xe_hwmon_energy_read(hwmon, attr, val);
+ ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -674,29 +706,49 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
ret = xe_hwmon_power_write(hwmon, attr, channel, val);
break;
case hwmon_curr:
- ret = xe_hwmon_curr_write(hwmon, attr, val);
+ ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
+static int xe_hwmon_read_label(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_power:
+ case hwmon_energy:
+ case hwmon_curr:
+ case hwmon_in:
+ if (channel == CHANNEL_CARD)
+ *str = "card";
+ else if (channel == CHANNEL_PKG)
+ *str = "pkg";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct hwmon_ops hwmon_ops = {
.is_visible = xe_hwmon_is_visible,
.read = xe_hwmon_read,
.write = xe_hwmon_write,
+ .read_string = xe_hwmon_read_label,
};
static const struct hwmon_chip_info hwmon_chip_info = {
@@ -710,14 +762,15 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
+ int channel;
/*
* The contents of register PKG_POWER_SKU_UNIT do not change,
* so read it once and store the shift values.
*/
- if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+ if (xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0))) {
xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
- REG_READ32, &val_sku_unit, 0, 0);
+ REG_READ32, &val_sku_unit, 0, 0, 0);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@@ -727,8 +780,9 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
* Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
* first value of the energy register read
*/
- if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
- xe_hwmon_energy_get(hwmon, &energy);
+ for (channel = 0; channel < CHANNEL_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
+ xe_hwmon_energy_get(hwmon, channel, &energy);
}
static void xe_hwmon_mutex_destroy(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 2f5d179e0d00..996806353171 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -187,7 +187,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
* GSCCS interrupts, but it has its own mask register.
*/
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
- gsc_mask = irqs;
+ gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
} else if (HAS_HECI_GSCFI(xe)) {
gsc_mask = GSC_IRQ_INTF(1);
@@ -326,7 +326,6 @@ static void gt_irq_handler(struct xe_tile *tile,
xe_heci_gsc_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
- continue;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 0d7c5514e092..418661a88918 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -35,7 +35,7 @@
static bool xe_has_multi_level_lmtt(struct xe_device *xe)
{
- return xe->info.platform == XE_PVC;
+ return GRAPHICS_VERx100(xe) >= 1260;
}
static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt)
@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 7ad853b0788a..615bbc372ac6 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -5,8 +5,11 @@
#include "xe_lrc.h"
+#include <linux/ascii85.h>
+
#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
+#include "instructions/xe_gfx_state_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gpu_commands.h"
#include "regs/xe_lrc_layout.h"
@@ -23,13 +26,28 @@
#include "xe_sriov.h"
#include "xe_vm.h"
-#define LRC_VALID (1 << 0)
-#define LRC_PRIVILEGE (1 << 8)
-#define LRC_ADDRESSING_MODE_SHIFT 3
+#define LRC_VALID BIT_ULL(0)
+#define LRC_PRIVILEGE BIT_ULL(8)
+#define LRC_ADDRESSING_MODE GENMASK_ULL(4, 3)
#define LRC_LEGACY_64B_CONTEXT 3
-#define ENGINE_CLASS_SHIFT 61
-#define ENGINE_INSTANCE_SHIFT 48
+#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
+#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+
+struct xe_lrc_snapshot {
+ struct xe_bo *lrc_bo;
+ void *lrc_snapshot;
+ unsigned long lrc_size, lrc_offset;
+
+ u32 context_desc;
+ u32 head;
+ struct {
+ u32 internal;
+ u32 memory;
+ } tail;
+ u32 start_seqno;
+ u32 seqno;
+};
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
@@ -97,7 +115,6 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END 0
{
const u32 base = hwe->mmio_base;
@@ -168,7 +185,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 dg2_xcs_offsets[] = {
@@ -202,7 +219,7 @@ static const u8 dg2_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 gen12_rcs_offsets[] = {
@@ -298,7 +315,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END
+ 0
};
static const u8 xehp_rcs_offsets[] = {
@@ -339,7 +356,7 @@ static const u8 xehp_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 dg2_rcs_offsets[] = {
@@ -382,7 +399,7 @@ static const u8 dg2_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 mtl_rcs_offsets[] = {
@@ -425,7 +442,7 @@ static const u8 mtl_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
#define XE2_CTX_COMMON \
@@ -471,7 +488,7 @@ static const u8 xe2_rcs_offsets[] = {
LRI(1, 0), /* [0x47] */
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
- END
+ 0
};
static const u8 xe2_bcs_offsets[] = {
@@ -482,16 +499,15 @@ static const u8 xe2_bcs_offsets[] = {
REG16(0x200), /* [0x42] BCS_SWCTRL */
REG16(0x204), /* [0x44] BLIT_CCTL */
- END
+ 0
};
static const u8 xe2_xcs_offsets[] = {
XE2_CTX_COMMON,
- END
+ 0
};
-#undef END
#undef REG16
#undef REG
#undef LRI
@@ -527,9 +543,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
/* TODO: Timestamp */
}
@@ -637,7 +652,7 @@ static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \
iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
return map; \
} \
-static inline u32 __xe_lrc_##elem##_ggtt_addr(struct xe_lrc *lrc) \
+static inline u32 __maybe_unused __xe_lrc_##elem##_ggtt_addr(struct xe_lrc *lrc) \
{ \
return xe_bo_ggtt_addr(lrc->bo) + __xe_lrc_##elem##_offset(lrc); \
} \
@@ -727,8 +742,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
@@ -779,7 +795,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
- lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
+ lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
/* TODO: Priority */
/* While this appears to have something about privileged batches or
@@ -789,8 +805,8 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->desc |= LRC_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) {
- lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
- lrc->desc |= (u64)hwe->class << ENGINE_CLASS_SHIFT;
+ lrc->desc |= FIELD_PREP(LRC_ENGINE_INSTANCE, hwe->instance);
+ lrc->desc |= FIELD_PREP(LRC_ENGINE_CLASS, hwe->class);
}
arb_enable = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1037,6 +1053,8 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH(GPGPU_CSR_BASE_ADDRESS);
MATCH(STATE_COMPUTE_MODE);
MATCH3D(3DSTATE_BTD);
+ MATCH(STATE_SYSTEM_MEM_FENCE_ADDRESS);
+ MATCH(STATE_CONTEXT_DATA_BASE_ADDRESS);
MATCH3D(3DSTATE_VF_STATISTICS);
@@ -1061,6 +1079,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_WM);
MATCH3D(3DSTATE_CONSTANT_VS);
MATCH3D(3DSTATE_CONSTANT_GS);
+ MATCH3D(3DSTATE_CONSTANT_PS);
MATCH3D(3DSTATE_SAMPLE_MASK);
MATCH3D(3DSTATE_CONSTANT_HS);
MATCH3D(3DSTATE_CONSTANT_DS);
@@ -1153,6 +1172,31 @@ static int dump_gfxpipe_command(struct drm_printer *p,
}
}
+static int dump_gfx_state_command(struct drm_printer *p,
+ struct xe_gt *gt,
+ u32 *dw,
+ int remaining_dw)
+{
+ u32 numdw = instr_dw(*dw);
+ u32 opcode = REG_FIELD_GET(GFX_STATE_OPCODE, *dw);
+
+ /*
+ * Make sure we haven't mis-parsed a number of dwords that exceeds the
+ * remaining size of the LRC.
+ */
+ if (xe_gt_WARN_ON(gt, numdw > remaining_dw))
+ numdw = remaining_dw;
+
+ switch (*dw & (XE_INSTR_GFX_STATE | GFX_STATE_OPCODE)) {
+ MATCH(STATE_WRITE_INLINE);
+
+ default:
+ drm_printf(p, "[%#010x] unknown GFX_STATE command (opcode=%#x), likely %d dwords\n",
+ *dw, opcode, numdw);
+ return numdw;
+ }
+}
+
void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,
enum xe_engine_class hwe_class)
@@ -1177,6 +1221,8 @@ void xe_lrc_dump_default(struct drm_printer *p,
num_dw = dump_mi_command(p, gt, dw, remaining_dw);
} else if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_GFXPIPE) {
num_dw = dump_gfxpipe_command(p, gt, dw, remaining_dw);
+ } else if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_GFX_STATE) {
+ num_dw = dump_gfx_state_command(p, gt, dw, remaining_dw);
} else {
num_dw = min(instr_dw(*dw), remaining_dw);
drm_printf(p, "[%#10x] Unknown instruction of type %#x, likely %d dwords\n",
@@ -1300,3 +1346,101 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
bb->len += num_dw;
}
}
+
+struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
+{
+ struct xe_lrc_snapshot *snapshot = kmalloc(sizeof(*snapshot), GFP_NOWAIT);
+
+ if (!snapshot)
+ return NULL;
+
+ snapshot->context_desc = lower_32_bits(xe_lrc_ggtt_addr(lrc));
+ snapshot->head = xe_lrc_ring_head(lrc);
+ snapshot->tail.internal = lrc->ring.tail;
+ snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
+ snapshot->start_seqno = xe_lrc_start_seqno(lrc);
+ snapshot->seqno = xe_lrc_seqno(lrc);
+ snapshot->lrc_bo = xe_bo_get(lrc->bo);
+ snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
+ snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
+ snapshot->lrc_snapshot = NULL;
+ return snapshot;
+}
+
+void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
+{
+ struct xe_bo *bo;
+ struct iosys_map src;
+
+ if (!snapshot)
+ return;
+
+ bo = snapshot->lrc_bo;
+ snapshot->lrc_bo = NULL;
+
+ snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
+ if (!snapshot->lrc_snapshot)
+ goto put_bo;
+
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+ if (!ttm_bo_vmap(&bo->ttm, &src)) {
+ xe_map_memcpy_from(xe_bo_device(bo),
+ snapshot->lrc_snapshot, &src, snapshot->lrc_offset,
+ snapshot->lrc_size);
+ ttm_bo_vunmap(&bo->ttm, &src);
+ } else {
+ kvfree(snapshot->lrc_snapshot);
+ snapshot->lrc_snapshot = NULL;
+ }
+ dma_resv_unlock(bo->ttm.base.resv);
+put_bo:
+ xe_bo_put(bo);
+}
+
+void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
+{
+ unsigned long i;
+
+ if (!snapshot)
+ return;
+
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
+ drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
+ snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
+ drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
+
+ if (!snapshot->lrc_snapshot)
+ return;
+
+ drm_printf(p, "\t[HWSP].length: 0x%x\n", LRC_PPHWSP_SIZE);
+ drm_puts(p, "\t[HWSP].data: ");
+ for (i = 0; i < LRC_PPHWSP_SIZE; i += sizeof(u32)) {
+ u32 *val = snapshot->lrc_snapshot + i;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+
+ drm_printf(p, "\n\t[HWCTX].length: 0x%lx\n", snapshot->lrc_size - LRC_PPHWSP_SIZE);
+ drm_puts(p, "\t[HWCTX].data: ");
+ for (; i < snapshot->lrc_size; i += sizeof(u32)) {
+ u32 *val = snapshot->lrc_snapshot + i;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+ drm_puts(p, "\n");
+}
+
+void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ kvfree(snapshot->lrc_snapshot);
+ if (snapshot->lrc_bo)
+ xe_bo_put(snapshot->lrc_bo);
+ kfree(snapshot);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 28b1d3f404d4..d32fa31faa2c 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -55,4 +55,9 @@ void xe_lrc_dump_default(struct drm_printer *p,
void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc);
+void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot);
+void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p);
+void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 24f20ed66fd1..b716df0dfb4e 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -43,4 +43,6 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
};
+struct xe_lrc_snapshot;
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 76e95535d7f6..95b6e9d7b7db 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -127,10 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
/* XXX: convert to managed bo */
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_SYSTEM_BIT |
- XE_BO_CREATE_GGTT_BIT |
- XE_BO_NEEDS_UC |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ee1bb938c493..9f6e9b7f11c8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -16,6 +16,7 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_gpu_commands.h"
+#include "regs/xe_gtt_defs.h"
#include "tests/xe_test.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -155,8 +156,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -227,7 +228,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
@@ -235,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -291,7 +292,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -490,7 +491,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
- u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
@@ -984,7 +985,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
- int pass = 0;
if (!clear_vram)
xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
@@ -1005,8 +1005,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_L0 = xe_migrate_res_sizes(m, &src_it);
- drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
-
/* Calculate final sizes and batch size.. */
batch_size = 2 +
pte_update_size(m, clear_vram, src, &src_it,
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 7ba2477452d7..334637511e75 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -163,6 +163,42 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
return 0;
}
+static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 offset;
+ u32 reg;
+
+ if (GRAPHICS_VER(xe) >= 20) {
+ u64 ccs_size = tile_size / 512;
+ u64 offset_hi, offset_lo;
+ u32 nodes, num_enabled;
+
+ reg = xe_mmio_read32(gt, MIRROR_FUSE3);
+ nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg);
+ num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */
+
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
+ offset_lo = REG_FIELD_GET(XE2_FLAT_CCS_BASE_LOWER_ADDR_MASK, reg);
+
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_UPPER);
+ offset_hi = REG_FIELD_GET(XE2_FLAT_CCS_BASE_UPPER_ADDR_MASK, reg);
+
+ offset = offset_hi << 32; /* HW view bits 39:32 */
+ offset |= offset_lo << 6; /* HW view bits 31:6 */
+ offset *= num_enabled; /* convert to SW view */
+
+ /* We don't expect any holes */
+ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
+ "Hole between CCS and GSM.\n");
+ } else {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K;
+ }
+
+ return offset;
+}
+
/**
* xe_mmio_tile_vram_size() - Collect vram size and offset information
* @tile: tile to get info for
@@ -207,8 +243,7 @@ static int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size,
/* minus device usage */
if (xe->info.has_flat_ccs) {
- reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
- offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
+ offset = get_flat_ccs_offset(gt, *tile_size);
} else {
offset = xe_mmio_read64_2x32(gt, GSMBASE);
}
@@ -360,32 +395,9 @@ static void mmio_fini(struct drm_device *drm, void *arg)
iounmap(xe->mem.vram.mapping);
}
-static int xe_verify_lmem_ready(struct xe_device *xe)
-{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
- if (!IS_DGFX(xe))
- return 0;
-
- if (IS_SRIOV_VF(xe))
- return 0;
-
- /*
- * The boot firmware initializes local memory and assesses its health.
- * If memory training fails, the punit will have been instructed to
- * keep the GT powered down; we won't be able to communicate with it
- * and we should not continue with driver initialization.
- */
- if (!(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
- drm_err(&xe->drm, "VRAM not initialized by firmware\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
int xe_mmio_init(struct xe_device *xe)
{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
const int mmio_bar = 0;
@@ -401,23 +413,83 @@ int xe_mmio_init(struct xe_device *xe)
return -EIO;
}
+ /* Setup first tile; other tiles (if present) will be setup later. */
+ root_tile->mmio.size = SZ_16M;
+ root_tile->mmio.regs = xe->mmio.regs;
+
return drmm_add_action_or_reset(&xe->drm, mmio_fini, xe);
}
-int xe_mmio_root_tile_init(struct xe_device *xe)
+u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
- struct xe_tile *root_tile = xe_device_get_root_tile(xe);
- int err;
+ struct xe_tile *tile = gt_to_tile(gt);
- /* Setup first tile; other tiles (if present) will be setup later. */
- root_tile->mmio.size = SZ_16M;
- root_tile->mmio.regs = xe->mmio.regs;
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
- err = xe_verify_lmem_ready(xe);
- if (err)
- return err;
+ return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
- return 0;
+u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+{
+ u32 old, reg_val;
+
+ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & ~clr) | set;
+ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+}
+
+int xe_mmio_write32_and_verify(struct xe_gt *gt,
+ struct xe_reg reg, u32 val, u32 mask, u32 eval)
+{
+ u32 reg_val;
+
+ xe_mmio_write32(gt, reg, val);
+ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+}
+
+bool xe_mmio_in_range(const struct xe_gt *gt,
+ const struct xe_mmio_range *range,
+ struct xe_reg reg)
+{
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return range && reg.addr >= range->start && reg.addr <= range->end;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 98de5c13c89b..a3cd7b3036c7 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -21,83 +21,15 @@ struct xe_device;
#define LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
-int xe_mmio_root_tile_init(struct xe_device *xe);
void xe_mmio_probe_tiles(struct xe_device *xe);
-static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline void xe_mmio_write32(struct xe_gt *gt,
- struct xe_reg reg, u32 val)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
-}
-
-static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
- u32 set)
-{
- u32 old, reg_val;
-
- old = xe_mmio_read32(gt, reg);
- reg_val = (old & ~clr) | set;
- xe_mmio_write32(gt, reg, reg_val);
-
- return old;
-}
-
-static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
- struct xe_reg reg, u32 val,
- u32 mask, u32 eval)
-{
- u32 reg_val;
-
- xe_mmio_write32(gt, reg, val);
- reg_val = xe_mmio_read32(gt, reg);
-
- return (reg_val & mask) != eval ? -EINVAL : 0;
-}
-
-static inline bool xe_mmio_in_range(const struct xe_gt *gt,
- const struct xe_mmio_range *range,
- struct xe_reg reg)
-{
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return range && reg.addr >= range->start && reg.addr <= range->end;
-}
+u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
+void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
+u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
+u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
+int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
+bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 609d997b3e9b..1e92f8ee07ba 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -17,10 +17,10 @@
#include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
-#define mocs_dbg drm_dbg
+#define mocs_dbg xe_gt_dbg
#else
__printf(2, 3)
-static inline void mocs_dbg(const struct drm_device *dev,
+static inline void mocs_dbg(const struct xe_gt *gt,
const char *format, ...)
{ /* noop */ }
#endif
@@ -72,7 +72,7 @@ struct xe_mocs_info {
/* Helper defines */
#define XELP_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
#define PVC_NUM_MOCS_ENTRIES 3
-#define MTL_NUM_MOCS_ENTRIES 16
+#define MTL_NUM_MOCS_ENTRIES 16
#define XE2_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
@@ -375,6 +375,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
switch (xe->info.platform) {
case XE_LUNARLAKE:
+ case XE_BATTLEMAGE:
info->size = ARRAY_SIZE(xe2_mocs_table);
info->table = xe2_mocs_table;
info->n_entries = XE2_NUM_MOCS_ENTRIES;
@@ -401,7 +402,11 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
info->size = ARRAY_SIZE(dg2_mocs_desc);
info->table = dg2_mocs_desc;
info->uc_index = 1;
- info->n_entries = XELP_NUM_MOCS_ENTRIES;
+ /*
+ * Last entry is RO on hardware, don't bother with what was
+ * written when checking later
+ */
+ info->n_entries = XELP_NUM_MOCS_ENTRIES - 1;
info->unused_entries_index = 3;
break;
case XE_DG1:
@@ -462,24 +467,34 @@ static u32 get_entry_control(const struct xe_mocs_info *info,
return info->table[info->unused_entries_index].control_value;
}
-static void __init_mocs_table(struct xe_gt *gt,
- const struct xe_mocs_info *info)
+static bool regs_are_mcr(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ if (xe_gt_is_media_type(gt))
+ return MEDIA_VER(xe) >= 20;
+ else
+ return GRAPHICS_VERx100(xe) >= 1250;
+}
+
+static void __init_mocs_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
unsigned int i;
u32 mocs;
- mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
- drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
- "Unused entries index should have been defined\n");
- for (i = 0;
- i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
- i++) {
- mocs_dbg(&gt_to_xe(gt)->drm, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
+ xe_gt_WARN_ONCE(gt, !info->unused_entries_index,
+ "Unused entries index should have been defined\n");
+
+ mocs_dbg(gt, "mocs entries: %d\n", info->n_entries);
+
+ for (i = 0; i < info->n_entries; i++) {
+ mocs = get_entry_control(info, i);
+
+ mocs_dbg(gt, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
XELP_GLOBAL_MOCS(i).addr, mocs);
- if (GRAPHICS_VERx100(gt_to_xe(gt)) > 1250)
+ if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
else
xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
@@ -510,16 +525,16 @@ static void init_l3cc_table(struct xe_gt *gt,
unsigned int i;
u32 l3cc;
- mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
- for (i = 0;
- i < (info->n_entries + 1) / 2 ?
- (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
- get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
- i++) {
- mocs_dbg(&gt_to_xe(gt)->drm, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr,
- l3cc);
+ mocs_dbg(gt, "l3cc entries: %d\n", info->n_entries);
+
+ for (i = 0; i < (info->n_entries + 1) / 2; i++) {
+ l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
+ get_entry_l3cc(info, 2 * i + 1));
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ mocs_dbg(gt, "LNCFCMOCS[%d] 0x%x 0x%x\n", i,
+ XELP_LNCFCMOCS(i).addr, l3cc);
+
+ if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
else
xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
@@ -552,7 +567,10 @@ void xe_mocs_init(struct xe_gt *gt)
* performed by the GuC.
*/
flags = get_mocs_settings(gt_to_xe(gt), &table);
- mocs_dbg(&gt_to_xe(gt)->drm, "flag:0x%x\n", flags);
+ mocs_dbg(gt, "flag:0x%x\n", flags);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt, &table);
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 110b69864656..ceb8345cbca6 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -48,6 +48,13 @@ module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
MODULE_PARM_DESC(force_probe,
"Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+#ifdef CONFIG_PCI_IOV
+module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
+MODULE_PARM_DESC(max_vfs,
+ "Limit number of Virtual Functions (VFs) that could be managed. "
+ "(0 = no VFs [default]; N = allow up to N VFs)");
+#endif
+
struct init_funcs {
int (*init)(void);
void (*exit)(void);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 88ef0e8b2bfd..b369984f08ec 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -18,6 +18,9 @@ struct xe_modparam {
char *huc_firmware_path;
char *gsc_firmware_path;
char *force_probe;
+#ifdef CONFIG_PCI_IOV
+ unsigned int max_vfs;
+#endif
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index e148934d554b..d5b516f115ad 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -142,6 +142,7 @@ static const struct xe_pat_table_entry xe2_pat_table[] = {
/* Special PAT values programmed outside the main table */
static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
+static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
{
@@ -174,7 +175,6 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -192,7 +192,6 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -205,7 +204,6 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -225,7 +223,6 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -238,7 +235,6 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -256,7 +252,6 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -269,7 +264,6 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
int i, err;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -292,7 +286,6 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
/*
@@ -310,6 +303,9 @@ static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
{
program_pat_mcr(gt, table, n_entries);
xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
+
+ if (IS_DGFX(gt_to_xe(gt)))
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
@@ -317,6 +313,9 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
{
program_pat(gt, table, n_entries);
xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
+
+ if (IS_DGFX(gt_to_xe(gt)))
+ xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -325,7 +324,6 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
int i, err;
u32 pat;
- xe_device_mem_access_get(xe);
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
goto err_fw;
@@ -370,7 +368,6 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
err_fw:
xe_assert(xe, !err);
- xe_device_mem_access_put(xe);
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -438,6 +435,10 @@ void xe_pat_init_early(struct xe_device *xe)
/* VFs can't program nor dump PAT settings */
if (IS_SRIOV_VF(xe))
xe->pat.ops = NULL;
+
+ xe_assert(xe, !xe->pat.ops || xe->pat.ops->dump);
+ xe_assert(xe, !xe->pat.ops || xe->pat.ops->program_graphics);
+ xe_assert(xe, !xe->pat.ops || MEDIA_VER(xe) < 13 || xe->pat.ops->program_media);
}
void xe_pat_init(struct xe_gt *gt)
@@ -457,7 +458,7 @@ void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- if (!xe->pat.ops->dump)
+ if (!xe->pat.ops)
return;
xe->pat.ops->dump(gt, p);
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 557f2d88a8c1..f326dbb1cecd 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -174,7 +174,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG",
+ .name = "Xe2_LPG / Xe2_HPG",
XE2_GFX_FEATURES,
};
@@ -185,8 +185,8 @@ static const struct xe_media_desc media_xem = {
.rel = 0,
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0),
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xehpm = {
@@ -195,21 +195,23 @@ static const struct xe_media_desc media_xehpm = {
.rel = 55,
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_VECS1),
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xelpmp = {
.name = "Xe_LPM+",
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
- BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_GSCCS0)
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
+ BIT(XE_HW_ENGINE_GSCCS0)
};
static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM",
+ .name = "Xe2_LPM / Xe2_HPM",
.hw_engine_mask =
- BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
+ GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
+ GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
};
static const struct xe_device_desc tgl_desc = {
@@ -333,6 +335,13 @@ static const struct xe_device_desc mtl_desc = {
static const struct xe_device_desc lnl_desc = {
PLATFORM(XE_LUNARLAKE),
+ .has_display = true,
+ .require_force_probe = true,
+};
+
+static const struct xe_device_desc bmg_desc __maybe_unused = {
+ DGFX_FEATURES,
+ PLATFORM(XE_BATTLEMAGE),
.require_force_probe = true,
};
@@ -343,12 +352,15 @@ __diag_pop();
static const struct gmdid_map graphics_ip_map[] = {
{ 1270, &graphics_xelpg },
{ 1271, &graphics_xelpg },
+ { 1274, &graphics_xelpg }, /* Xe_LPG+ */
+ { 2001, &graphics_xe2 },
{ 2004, &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
static const struct gmdid_map media_ip_map[] = {
{ 1300, &media_xelpmp },
+ { 1301, &media_xe2 },
{ 2000, &media_xe2 },
};
@@ -737,8 +749,6 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- xe_sriov_probe_early(xe, desc->has_sriov);
-
err = xe_device_probe_early(xe);
if (err)
return err;
@@ -774,18 +784,26 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
str_yes_no(xe_device_has_sriov(xe)),
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
- xe_pm_init_early(xe);
+ err = xe_pm_init_early(xe);
+ if (err)
+ return err;
err = xe_device_probe(xe);
if (err)
return err;
- xe_pm_init(xe);
+ err = xe_pm_init(xe);
+ if (err)
+ goto err_driver_cleanup;
drm_dbg(&xe->drm, "d3cold: capable=%s\n",
str_yes_no(xe->d3cold.capable));
return 0;
+
+err_driver_cleanup:
+ xe_pci_remove(pdev);
+ return err;
}
static void xe_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index b324dc2a5deb..c010ef16fbf5 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- lockdep_assert_held(&gt->pcode.lock);
-
err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
@@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
- unsigned int timeout_ms, bool return_data,
- bool atomic)
+static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
{
int err;
if (gt_to_xe(gt)->info.skip_pcode)
return 0;
- lockdep_assert_held(&gt->pcode.lock);
-
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
@@ -74,7 +71,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
- timeout_ms * 1000, NULL, atomic);
+ timeout_ms * USEC_PER_MSEC, NULL, atomic);
if (err)
return err;
@@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
return pcode_mailbox_status(gt);
}
+static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
+{
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ lockdep_assert_held(&gt->pcode.lock);
+
+ return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
+}
+
int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
{
int err;
@@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
return err;
}
-static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status, bool atomic, int timeout_us)
+static int pcode_try_request(struct xe_gt *gt, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
for (slept = 0; slept < timeout_us; slept += wait) {
- *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
- atomic);
+ if (locked)
+ *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
+ else
+ *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- false, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
"PCODE timeout, retrying with preemption disabled\n");
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ true, timeout_base_ms * 1000, true);
preempt_enable();
out:
@@ -238,59 +251,71 @@ unlock:
}
/**
- * xe_pcode_init - Ensure PCODE is initialized
- * @gt: gt instance
+ * xe_pcode_ready - Ensure PCODE is initialized
+ * @xe: xe instance
+ * @locked: true if lock held, false otherwise
*
- * This function ensures that PCODE is properly initialized. To be called during
- * probe and resume paths.
+ * PCODE init mailbox is polled only on root gt of root tile
+ * as the root tile provides the initialization is complete only
+ * after all the tiles have completed the initialization.
+ * Called only on early probe without locks and with locks in
+ * resume path.
*
- * It returns 0 on success, and -error number on failure.
+ * Returns 0 on success, and -error number on failure.
*/
-int xe_pcode_init(struct xe_gt *gt)
+int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (xe->info.skip_pcode)
return 0;
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!IS_DGFX(xe))
return 0;
- mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request,
- DGFX_INIT_STATUS_COMPLETE,
- DGFX_INIT_STATUS_COMPLETE,
- &status, false, timeout_us);
- mutex_unlock(&gt->pcode.lock);
+ if (locked)
+ mutex_lock(&gt->pcode.lock);
+
+ ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ DGFX_INIT_STATUS_COMPLETE,
+ DGFX_INIT_STATUS_COMPLETE,
+ &status, false, timeout_us, locked);
+
+ if (locked)
+ mutex_unlock(&gt->pcode.lock);
if (ret)
- drm_err(&gt_to_xe(gt)->drm,
+ drm_err(&xe->drm,
"PCODE initialization timedout after: 3 min\n");
return ret;
}
/**
- * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
+ * xe_pcode_init: initialize components of PCODE
* @gt: gt instance
*
- * This function initializes the xe_pcode component, and when needed, it ensures
- * that PCODE has properly performed its initialization and it is really ready
- * to go. To be called once only during probe.
- *
- * It returns 0 on success, and -error number on failure.
+ * This function initializes the xe_pcode component.
+ * To be called once only during probe.
*/
-int xe_pcode_probe(struct xe_gt *gt)
+void xe_pcode_init(struct xe_gt *gt)
{
drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+}
- if (gt_to_xe(gt)->info.skip_pcode)
- return 0;
-
- if (!IS_DGFX(gt_to_xe(gt)))
- return 0;
-
- return xe_pcode_init(gt);
+/**
+ * xe_pcode_probe_early: initializes PCODE
+ * @xe: xe instance
+ *
+ * This function checks the initialization status of PCODE
+ * To be called once only during early probe without locks.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int xe_pcode_probe_early(struct xe_device *xe)
+{
+ return xe_pcode_ready(xe, false);
}
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index 08cb1d047cba..3f54c6d2a57d 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -8,9 +8,11 @@
#include <linux/types.h>
struct xe_gt;
+struct xe_device;
-int xe_pcode_probe(struct xe_gt *gt);
-int xe_pcode_init(struct xe_gt *gt);
+void xe_pcode_init(struct xe_gt *gt);
+int xe_pcode_probe_early(struct xe_device *xe);
+int xe_pcode_ready(struct xe_device *xe, bool locked);
int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
u32 max_gt_freq);
int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index 553f53dbd093..79b7042c4534 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -22,6 +22,7 @@ enum xe_platform {
XE_PVC,
XE_METEORLAKE,
XE_LUNARLAKE,
+ XE_BATTLEMAGE,
};
enum xe_subplatform {
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 53b3b0b019ac..37fbeda12d3b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -25,23 +25,55 @@
/**
* DOC: Xe Power Management
*
- * Xe PM shall be guided by the simplicity.
- * Use the simplest hook options whenever possible.
- * Let's not reinvent the runtime_pm references and hooks.
- * Shall have a clear separation of display and gt underneath this component.
+ * Xe PM implements the main routines for both system level suspend states and
+ * for the opportunistic runtime suspend states.
*
- * What's next:
+ * System Level Suspend (S-States) - In general this is OS initiated suspend
+ * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
+ * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
+ * are the main point for the suspend to and resume from these states.
*
- * For now s2idle and s3 are only working in integrated devices. The next step
- * is to iterate through all VRAM's BO backing them up into the system memory
- * before allowing the system suspend.
+ * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
+ * state D3, controlled by the PCI subsystem and ACPI with the help from the
+ * runtime_pm infrastructure.
+ * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
+ * alive and quicker low latency resume or D3Cold where Vcc power is off for
+ * better power savings.
+ * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
+ * level, while the device driver can be behind multiple bridges/switches and
+ * paired with other devices. For this reason, the PCI subsystem cannot perform
+ * the transition towards D3Cold. The lowest runtime PM possible from the PCI
+ * subsystem is D3hot. Then, if all these paired devices in the same root port
+ * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
+ * to perform the transition from D3hot to D3cold. Xe may disallow this
+ * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
+ * suspend. It will be based on runtime conditions such as VRAM usage for a
+ * quick and low latency resume for instance.
*
- * Also runtime_pm needs to be here from the beginning.
+ * Runtime PM - This infrastructure provided by the Linux kernel allows the
+ * device drivers to indicate when the can be runtime suspended, so the device
+ * could be put at D3 (if supported), or allow deeper package sleep states
+ * (PC-states), and/or other low level power states. Xe PM component provides
+ * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
+ * subsystem will call before transition to/from runtime suspend.
*
- * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
- * and no wait boost. Frequency optimizations should come on a next stage.
+ * Also, Xe PM provides get and put functions that Xe driver will use to
+ * indicate activity. In order to avoid locking complications with the memory
+ * management, whenever possible, these get and put functions needs to be called
+ * from the higher/outer levels.
+ * The main cases that need to be protected from the outer levels are: IOCTL,
+ * sysfs, debugfs, dma-buf sharing, GPU execution.
+ *
+ * This component is not responsible for GT idleness (RC6) nor GT frequency
+ * management (RPS).
*/
+#ifdef CONFIG_LOCKDEP
+struct lockdep_map xe_pm_runtime_lockdep_map = {
+ .name = "xe_pm_runtime_lockdep_map"
+};
+#endif
+
/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
@@ -54,13 +86,15 @@ int xe_pm_suspend(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Suspending device\n");
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- return err;
+ goto err;
xe_display_pm_suspend(xe);
@@ -68,7 +102,7 @@ int xe_pm_suspend(struct xe_device *xe)
err = xe_gt_suspend(gt);
if (err) {
xe_display_pm_resume(xe);
- return err;
+ goto err;
}
}
@@ -76,7 +110,11 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ drm_dbg(&xe->drm, "Device suspended\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
+ return err;
}
/**
@@ -92,14 +130,14 @@ int xe_pm_resume(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Resuming device\n");
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- return err;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ return err;
xe_display_pm_resume_early(xe);
@@ -109,7 +147,7 @@ int xe_pm_resume(struct xe_device *xe)
*/
err = xe_bo_restore_kernel(xe);
if (err)
- return err;
+ goto err;
xe_irq_resume(xe);
@@ -120,9 +158,13 @@ int xe_pm_resume(struct xe_device *xe)
err = xe_bo_restore_user(xe);
if (err)
- return err;
+ goto err;
+ drm_dbg(&xe->drm, "Device resumed\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device resume failed %d\n", err);
+ return err;
}
static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
@@ -172,30 +214,60 @@ static void xe_pm_runtime_init(struct xe_device *xe)
pm_runtime_put(dev);
}
-void xe_pm_init_early(struct xe_device *xe)
+int xe_pm_init_early(struct xe_device *xe)
{
+ int err;
+
INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
- drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+
+ err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+ if (err)
+ return err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
+ if (err)
+ return err;
+
+ return 0;
}
-void xe_pm_init(struct xe_device *xe)
+/**
+ * xe_pm_init - Initialize Xe Power Management
+ * @xe: xe device instance
+ *
+ * This component is responsible for System and Device sleep states.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_pm_init(struct xe_device *xe)
{
+ int err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
- return;
-
- drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
+ return 0;
xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
if (xe->d3cold.capable) {
- xe_device_sysfs_init(xe);
- xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ return err;
+
+ err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ if (err)
+ return err;
}
xe_pm_runtime_init(xe);
+
+ return 0;
}
+/**
+ * xe_pm_runtime_fini - Finalize Runtime PM
+ * @xe: xe device instance
+ */
void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -225,6 +297,28 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
return READ_ONCE(xe->pm_callback_task);
}
+/**
+ * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
+ * @xe: xe device instance
+ *
+ * This does not provide any guarantee that the device is going to remain
+ * suspended as it might be racing with the runtime state transitions.
+ * It can be used only as a non-reliable assertion, to ensure that we are not in
+ * the sleep state while trying to access some memory for instance.
+ *
+ * Returns true if PCI device is suspended, false otherwise.
+ */
+bool xe_pm_runtime_suspended(struct xe_device *xe)
+{
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
+ * @xe: xe device instance
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_runtime_suspend(struct xe_device *xe)
{
struct xe_bo *bo, *on;
@@ -232,18 +326,15 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
u8 id;
int err = 0;
- if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
- return -EBUSY;
-
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
/*
- * The actual xe_device_mem_access_put() is always async underneath, so
+ * The actual xe_pm_runtime_put() is always async underneath, so
* exactly where that is called should makes no difference to us. However
* we still need to be very careful with the locks that this callback
* acquires and the locks that are acquired and held by any callers of
- * xe_device_mem_access_get(). We already have the matching annotation
+ * xe_runtime_pm_get(). We already have the matching annotation
* on that side, but we also need it here. For example lockdep should be
* able to tell us if the following scenario is in theory possible:
*
@@ -251,15 +342,15 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* lock(A) |
* | xe_pm_runtime_suspend()
* | lock(A)
- * xe_device_mem_access_get() |
+ * xe_pm_runtime_get() |
*
* This will clearly deadlock since rpm core needs to wait for
* xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
* on CPU0 which prevents CPU1 making forward progress. With the
- * annotation here and in xe_device_mem_access_get() lockdep will see
+ * annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
@@ -285,11 +376,17 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_irq_suspend(xe);
out:
- lock_map_release(&xe_device_mem_access_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
xe_pm_write_callback_task(xe, NULL);
return err;
}
+/**
+ * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
+ * @xe: xe device instance
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_runtime_resume(struct xe_device *xe)
{
struct xe_gt *gt;
@@ -299,7 +396,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
/*
* It can be possible that xe has allowed d3cold but other pcie devices
@@ -310,11 +407,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
if (xe->d3cold.allowed && xe->d3cold.power_lost) {
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- goto out;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ goto out;
/*
* This only restores pinned memory which is the memory
@@ -336,27 +431,147 @@ int xe_pm_runtime_resume(struct xe_device *xe)
goto out;
}
out:
- lock_map_release(&xe_device_mem_access_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
xe_pm_write_callback_task(xe, NULL);
return err;
}
-int xe_pm_runtime_get(struct xe_device *xe)
+/*
+ * For places where resume is synchronous it can be quite easy to deadlock
+ * if we are not careful. Also in practice it might be quite timing
+ * sensitive to ever see the 0 -> 1 transition with the callers locks
+ * held, so deadlocks might exist but are hard for lockdep to ever see.
+ * With this in mind, help lockdep learn about the potentially scary
+ * stuff that can happen inside the runtime_resume callback by acquiring
+ * a dummy lock (it doesn't protect anything and gets compiled out on
+ * non-debug builds). Lockdep then only needs to see the
+ * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * For example if the (callers_locks) are ever grabbed in the
+ * runtime_resume callback, lockdep should give us a nice splat.
+ */
+static void pm_runtime_lockdep_prime(void)
+{
+ lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
+}
+
+/**
+ * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
+ * @xe: xe device instance
+ */
+void xe_pm_runtime_get(struct xe_device *xe)
{
- return pm_runtime_get_sync(xe->drm.dev);
+ pm_runtime_get_noresume(xe->drm.dev);
+
+ if (xe_pm_read_callback_task(xe) == current)
+ return;
+
+ pm_runtime_lockdep_prime();
+ pm_runtime_resume(xe->drm.dev);
}
-int xe_pm_runtime_put(struct xe_device *xe)
+/**
+ * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
+ * @xe: xe device instance
+ */
+void xe_pm_runtime_put(struct xe_device *xe)
{
- pm_runtime_mark_last_busy(xe->drm.dev);
- return pm_runtime_put(xe->drm.dev);
+ if (xe_pm_read_callback_task(xe) == current) {
+ pm_runtime_put_noidle(xe->drm.dev);
+ } else {
+ pm_runtime_mark_last_busy(xe->drm.dev);
+ pm_runtime_put(xe->drm.dev);
+ }
}
+/**
+ * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
+ * @xe: xe device instance
+ *
+ * Returns: Any number greater than or equal to 0 for success, negative error
+ * code otherwise.
+ */
+int xe_pm_runtime_get_ioctl(struct xe_device *xe)
+{
+ if (WARN_ON(xe_pm_read_callback_task(xe) == current))
+ return -ELOOP;
+
+ pm_runtime_lockdep_prime();
+ return pm_runtime_get_sync(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
+ * @xe: xe device instance
+ *
+ * Returns: Any number greater than or equal to 0 for success, negative error
+ * code otherwise.
+ */
int xe_pm_runtime_get_if_active(struct xe_device *xe)
{
return pm_runtime_get_if_active(xe->drm.dev);
}
+/**
+ * xe_pm_runtime_get_if_in_use - Get a runtime_pm reference and resume if needed
+ * @xe: xe device instance
+ *
+ * Returns: True if device is awake and the reference was taken, false otherwise.
+ */
+bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
+{
+ if (xe_pm_read_callback_task(xe) == current) {
+ /* The device is awake, grab the ref and move on */
+ pm_runtime_get_noresume(xe->drm.dev);
+ return true;
+ }
+
+ return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
+}
+
+/**
+ * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
+ * @xe: xe device instance
+ *
+ * This function should be used in inner places where it is surely already
+ * protected by outer-bound callers of `xe_pm_runtime_get`.
+ * It will warn if not protected.
+ * The reference should be put back after this function regardless, since it
+ * will always bump the usage counter, regardless.
+ */
+void xe_pm_runtime_get_noresume(struct xe_device *xe)
+{
+ bool ref;
+
+ ref = xe_pm_runtime_get_if_in_use(xe);
+
+ if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
+ pm_runtime_get_noresume(xe->drm.dev);
+}
+
+/**
+ * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
+ * @xe: xe device instance
+ *
+ * Returns: True if device is awake and the reference was taken, false otherwise.
+ */
+bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
+{
+ if (xe_pm_read_callback_task(xe) == current) {
+ /* The device is awake, grab the ref and move on */
+ pm_runtime_get_noresume(xe->drm.dev);
+ return true;
+ }
+
+ pm_runtime_lockdep_prime();
+ return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
+}
+
+/**
+ * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
+ * @xe: xe device instance
+ */
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -371,6 +586,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
}
}
+/**
+ * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
+ * @xe: xe device instance
+ * @threshold: VRAM size in bites for the D3cold threshold
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
{
struct ttm_resource_manager *man;
@@ -395,6 +617,13 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
return 0;
}
+/**
+ * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
+ * @xe: xe device instance
+ *
+ * To be called during runtime_pm idle callback.
+ * Check for all the D3Cold conditions ahead of runtime suspend.
+ */
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
{
struct ttm_resource_manager *man;
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 64a97c6726a7..18b0613fe57b 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -20,14 +20,19 @@ struct xe_device;
int xe_pm_suspend(struct xe_device *xe);
int xe_pm_resume(struct xe_device *xe);
-void xe_pm_init_early(struct xe_device *xe);
-void xe_pm_init(struct xe_device *xe);
+int xe_pm_init_early(struct xe_device *xe);
+int xe_pm_init(struct xe_device *xe);
void xe_pm_runtime_fini(struct xe_device *xe);
+bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
-int xe_pm_runtime_get(struct xe_device *xe);
-int xe_pm_runtime_put(struct xe_device *xe);
+void xe_pm_runtime_get(struct xe_device *xe);
+int xe_pm_runtime_get_ioctl(struct xe_device *xe);
+void xe_pm_runtime_put(struct xe_device *xe);
int xe_pm_runtime_get_if_active(struct xe_device *xe);
+bool xe_pm_runtime_get_if_in_use(struct xe_device *xe);
+void xe_pm_runtime_get_noresume(struct xe_device *xe);
+bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7bce2a332603..7d50c6e89d8e 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -49,7 +49,7 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
struct xe_exec_queue *q = pfence->q;
pfence->error = q->ops->suspend(q);
- queue_work(system_unbound_wq, &pfence->preempt_work);
+ queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
return true;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 7f54bc3e389d..5b7930f46cf3 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -5,6 +5,7 @@
#include "xe_pt.h"
+#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
@@ -108,11 +109,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
- XE_BO_CREATE_PINNED_BIT |
- XE_BO_CREATE_NO_RESV_EVICT |
- XE_BO_PAGETABLE);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NO_RESV_EVICT |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -618,7 +619,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret;
- if (vma && (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
+ if ((vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
(is_devmem || !IS_DGFX(xe)))
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
@@ -1135,8 +1136,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
&gt->tlb_invalidation.lock,
- gt->tlb_invalidation.fence_context,
- ++gt->tlb_invalidation.fence_seqno);
+ dma_fence_context_alloc(1), 1);
spin_unlock_irq(&gt->tlb_invalidation.lock);
INIT_LIST_HEAD(&ifence->base.link);
@@ -1236,6 +1236,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
if (err)
goto err;
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ goto err;
+
xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
@@ -1254,11 +1261,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
* non-faulting LR, in particular on user-space batch buffer chaining,
* it needs to be done here.
*/
- if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
- (!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
+ if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
+ } else if (rebind && !xe_vm_in_lr_mode(vm)) {
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -1297,7 +1306,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
}
/* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind &&
+ dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
last_munmap_rebind ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
@@ -1576,6 +1585,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
struct dma_fence *fence = NULL;
struct invalidation_fence *ifence;
struct xe_range_fence *rfence;
+ int err;
LLIST_HEAD(deferred);
@@ -1593,6 +1603,12 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
num_entries);
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ return ERR_PTR(err);
+
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 92bb06c0586e..df407d73e5f5 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -12,6 +12,7 @@
#include <drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
@@ -132,7 +133,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id > XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= XE_MAX_GT_PER_TILE)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
@@ -147,8 +148,8 @@ query_engine_cycles(struct xe_device *xe,
if (!hwe)
return -EINVAL;
- xe_device_mem_access_get(xe);
- xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL))
+ return -EIO;
__read_timestamps(gt,
RING_TIMESTAMP(hwe->mmio_base),
@@ -159,7 +160,6 @@ query_engine_cycles(struct xe_device *xe,
cpu_clock);
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_device_mem_access_put(xe);
resp.width = 36;
/* Only write to the output fields of user query */
@@ -403,6 +403,13 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
BIT(gt_to_tile(gt)->id) << 1;
gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
gt_list->gt_list[id].near_mem_regions;
+
+ gt_list->gt_list[id].ip_ver_major =
+ REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
+ gt_list->gt_list[id].ip_ver_minor =
+ REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
+ gt_list->gt_list[id].ip_ver_rev =
+ REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
}
if (copy_to_user(query_ptr, gt_list, size)) {
@@ -433,9 +440,7 @@ static int query_hwconfig(struct xe_device *xe,
if (!hwconfig)
return -ENOMEM;
- xe_device_mem_access_get(xe);
xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
- xe_device_mem_access_put(xe);
if (copy_to_user(query_ptr, hwconfig, size)) {
kfree(hwconfig);
@@ -544,14 +549,44 @@ query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
break;
}
+ case XE_QUERY_UC_TYPE_HUC: {
+ struct xe_gt *media_gt = NULL;
+ struct xe_huc *huc;
+
+ if (MEDIA_VER(xe) >= 13) {
+ struct xe_tile *tile;
+ u8 gt_id;
+
+ for_each_tile(tile, xe, gt_id) {
+ if (tile->media_gt) {
+ media_gt = tile->media_gt;
+ break;
+ }
+ }
+ } else {
+ media_gt = xe->tiles[0].primary_gt;
+ }
+
+ if (!media_gt)
+ break;
+
+ huc = &media_gt->uc.huc;
+ if (huc->fw.status == XE_UC_FIRMWARE_RUNNING)
+ version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE];
+ break;
+ }
default:
return -EINVAL;
}
- resp.branch_ver = 0;
- resp.major_ver = version->major;
- resp.minor_ver = version->minor;
- resp.patch_ver = version->patch;
+ if (version) {
+ resp.branch_ver = 0;
+ resp.major_ver = version->major;
+ resp.minor_ver = version->minor;
+ resp.patch_ver = version->patch;
+ } else {
+ return -ENODEV;
+ }
if (copy_to_user(query_ptr, &resp, size))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index c4edffcd4a32..d42b3f33bd7a 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -17,6 +17,7 @@
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_sched_job.h"
+#include "xe_sriov.h"
#include "xe_vm_types.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -219,10 +220,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
- struct xe_vm *vm = job->q->vm;
struct xe_gt *gt = job->q->gt;
- if (vm && vm->batch_invalidate_tlb) {
+ if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
@@ -270,7 +270,6 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
- struct xe_vm *vm = job->q->vm;
dw[i++] = preparser_disable(true);
@@ -282,13 +281,13 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
}
- if (vm && vm->batch_invalidate_tlb)
+ if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
dw[i++] = preparser_disable(false);
- if (!vm || !vm->batch_invalidate_tlb)
+ if (!job->ring_ops_flush_tlb)
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
@@ -317,7 +316,6 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
- struct xe_vm *vm = job->q->vm;
u32 mask_flags = 0;
dw[i++] = preparser_disable(true);
@@ -327,7 +325,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
/* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
- i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i);
+ i = emit_pipe_invalidate(mask_flags, job->ring_ops_flush_tlb, dw, i);
/* hsdes: 1809175790 */
if (has_aux_ccs(xe))
@@ -370,10 +368,12 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
- /* XXX: Do we need this? Leaving for now. */
- dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(0, dw, i);
- dw[i++] = preparser_disable(false);
+ if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
+ /* XXX: Do we need this? Leaving for now. */
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(0, dw, i);
+ dw[i++] = preparser_disable(false);
+ }
i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i);
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 2c4632259edd..8941522b7705 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -48,8 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 8151ddafb940..cd8a2fba5438 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -5,6 +5,7 @@
#include "xe_sched_job.h"
+#include <drm/xe_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
@@ -15,6 +16,8 @@
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
+#include "xe_pm.h"
+#include "xe_sync_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
@@ -157,7 +160,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
/* All other jobs require a VM to be open which has a ref */
if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_get(job_to_xe(job));
+ xe_pm_runtime_get_noresume(job_to_xe(job));
xe_device_assert_mem_access(job_to_xe(job));
trace_xe_sched_job_create(job);
@@ -190,7 +193,7 @@ void xe_sched_job_destroy(struct kref *ref)
container_of(ref, struct xe_sched_job, refcount);
if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_put(job_to_xe(job));
+ xe_pm_runtime_put(job_to_xe(job));
xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
@@ -250,6 +253,16 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
void xe_sched_job_arm(struct xe_sched_job *job)
{
+ struct xe_exec_queue *q = job->q;
+ struct xe_vm *vm = q->vm;
+
+ if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
+ (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
+ xe_vm_assert_held(vm);
+ q->tlb_flush_seqno = vm->tlb_flush_seqno;
+ job->ring_ops_flush_tlb = true;
+ }
+
drm_sched_job_arm(&job->drm);
}
@@ -278,6 +291,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
return drm_sched_job_add_dependency(&job->drm, fence);
}
+/**
+ * xe_sched_job_init_user_fence - Initialize user_fence for the job
+ * @job: job whose user_fence needs an init
+ * @sync: sync to be use to init user_fence
+ */
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync)
+{
+ if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
+ return;
+
+ job->user_fence.used = true;
+ job->user_fence.addr = sync->addr;
+ job->user_fence.value = sync->timeline_value;
+}
+
struct xe_sched_job_snapshot *
xe_sched_job_snapshot_capture(struct xe_sched_job *job)
{
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index f1a660648cf0..c75018f4660d 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -10,6 +10,7 @@
struct drm_printer;
struct xe_vm;
+struct xe_sync_entry;
#define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX
@@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
+void xe_sched_job_init_user_fence(struct xe_sched_job *job,
+ struct xe_sync_entry *sync);
static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index b1d83da50a53..5e12724219fd 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -39,6 +39,8 @@ struct xe_sched_job {
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
u32 migrate_flush_flags;
+ /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
+ bool ring_ops_flush_tlb;
/** @batch_addr: batch buffer address of job */
u64 batch_addr[];
};
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index f295d91886b1..1c3fa84b6adb 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -5,8 +5,13 @@
#include <drm/drm_managed.h>
+#include "regs/xe_sriov_regs.h"
+
#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -28,10 +33,16 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
}
}
+static bool test_is_vf(struct xe_device *xe)
+{
+ u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+
+ return value & VF_CAP;
+}
+
/**
* xe_sriov_probe_early - Probe a SR-IOV mode.
* @xe: the &xe_device to probe mode on
- * @has_sriov: flag indicating hardware support for SR-IOV
*
* This function should be called only once and as soon as possible during
* driver probe to detect whether we are running a SR-IOV Physical Function
@@ -40,12 +51,17 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
* SR-IOV PF mode detection is based on PCI @dev_is_pf() function.
* SR-IOV VF mode detection is based on dedicated MMIO register read.
*/
-void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov)
+void xe_sriov_probe_early(struct xe_device *xe)
{
enum xe_sriov_mode mode = XE_SRIOV_MODE_NONE;
+ bool has_sriov = xe->info.has_sriov;
- /* TODO: replace with proper mode detection */
- xe_assert(xe, !has_sriov);
+ if (has_sriov) {
+ if (test_is_vf(xe))
+ mode = XE_SRIOV_MODE_VF;
+ else if (xe_sriov_pf_readiness(xe))
+ mode = XE_SRIOV_MODE_PF;
+ }
xe_assert(xe, !xe->sriov.__mode);
xe->sriov.__mode = mode;
@@ -78,6 +94,13 @@ int xe_sriov_init(struct xe_device *xe)
if (!IS_SRIOV(xe))
return 0;
+ if (IS_SRIOV_PF(xe)) {
+ int err = xe_sriov_pf_init_early(xe);
+
+ if (err)
+ return err;
+ }
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
@@ -85,3 +108,34 @@ int xe_sriov_init(struct xe_device *xe)
return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
}
+
+/**
+ * xe_sriov_print_info - Print basic SR-IOV information.
+ * @xe: the &xe_device to print info from
+ * @p: the &drm_printer
+ *
+ * Print SR-IOV related information into provided DRM printer.
+ */
+void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p)
+{
+ drm_printf(p, "supported: %s\n", str_yes_no(xe_device_has_sriov(xe)));
+ drm_printf(p, "enabled: %s\n", str_yes_no(IS_SRIOV(xe)));
+ drm_printf(p, "mode: %s\n", xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+}
+
+/**
+ * xe_sriov_function_name() - Get SR-IOV Function name.
+ * @n: the Function number (identifier) to get name of
+ * @buf: the buffer to format to
+ * @size: size of the buffer (shall be at least 5 bytes)
+ *
+ * Return: formatted function name ("PF" or "VF%u").
+ */
+const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
+{
+ if (n)
+ snprintf(buf, size, "VF%u", n);
+ else
+ strscpy(buf, "PF", size);
+ return buf;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 1545552162c9..486bb21c3256 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -10,9 +10,13 @@
#include "xe_device_types.h"
#include "xe_sriov_types.h"
+struct drm_printer;
+
const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode);
+const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
-void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov);
+void xe_sriov_probe_early(struct xe_device *xe);
+void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
new file mode 100644
index 000000000000..0f721ae17b26
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_module.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_printk.h"
+
+static unsigned int wanted_max_vfs(struct xe_device *xe)
+{
+ return xe_modparam.max_vfs;
+}
+
+static int pf_reduce_totalvfs(struct xe_device *xe, int limit)
+{
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+
+ err = pci_sriov_set_totalvfs(pdev, limit);
+ if (err)
+ xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n",
+ limit, ERR_PTR(err));
+ return err;
+}
+
+static bool pf_continue_as_native(struct xe_device *xe, const char *why)
+{
+ xe_sriov_dbg(xe, "%s, continuing as native\n", why);
+ pf_reduce_totalvfs(xe, 0);
+ return false;
+}
+
+/**
+ * xe_sriov_pf_readiness - Check if PF functionality can be enabled.
+ * @xe: the &xe_device to check
+ *
+ * This function is called as part of the SR-IOV probe to validate if all
+ * PF prerequisites are satisfied and we can continue with enabling PF mode.
+ *
+ * Return: true if the PF mode can be turned on.
+ */
+bool xe_sriov_pf_readiness(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs);
+
+ xe_assert(xe, totalvfs <= U16_MAX);
+
+ if (!dev_is_pf(dev))
+ return false;
+
+ if (!xe_device_uc_enabled(xe))
+ return pf_continue_as_native(xe, "Guc submission disabled");
+
+ if (!newlimit)
+ return pf_continue_as_native(xe, "all VFs disabled");
+
+ pf_reduce_totalvfs(xe, newlimit);
+
+ xe->sriov.pf.device_total_vfs = totalvfs;
+ xe->sriov.pf.driver_max_vfs = newlimit;
+
+ return true;
+}
+
+/**
+ * xe_sriov_pf_init_early - Initialize SR-IOV PF specific data.
+ * @xe: the &xe_device to initialize
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_init_early(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock);
+}
+
+/**
+ * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
+ * @xe: the &xe_device to print info from
+ * @p: the &drm_printer
+ *
+ * Print SR-IOV PF related information into provided DRM printer.
+ */
+void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs);
+ drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
+ drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
new file mode 100644
index 000000000000..d1220e70e1c0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_H_
+#define _XE_SRIOV_PF_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+
+#ifdef CONFIG_PCI_IOV
+bool xe_sriov_pf_readiness(struct xe_device *xe);
+int xe_sriov_pf_init_early(struct xe_device *xe);
+void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
+#else
+static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
+{
+ return false;
+}
+
+static inline int xe_sriov_pf_init_early(struct xe_device *xe)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
new file mode 100644
index 000000000000..7d156ba82479
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_HELPERS_H_
+#define _XE_SRIOV_PF_HELPERS_H_
+
+#include "xe_assert.h"
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_types.h"
+
+/**
+ * xe_sriov_pf_assert_vfid() - warn if &id is not a supported VF number when debugging.
+ * @xe: the PF &xe_device to assert on
+ * @vfid: the VF number to assert
+ *
+ * Assert that &xe represents the Physical Function (PF) device and provided &vfid
+ * is within a range of supported VF numbers (up to maximum number of VFs that
+ * driver can support, including VF0 that represents the PF itself).
+ *
+ * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ */
+#define xe_sriov_pf_assert_vfid(xe, vfid) \
+ xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
+
+/**
+ * xe_sriov_pf_get_totalvfs() - Get maximum number of VFs that driver can support.
+ * @xe: the &xe_device to query (shall be PF)
+ *
+ * Return: Maximum number of VFs that this PF driver supports.
+ */
+static inline int xe_sriov_pf_get_totalvfs(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ return xe->sriov.pf.driver_max_vfs;
+}
+
+static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ return &xe->sriov.pf.master_lock;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index 1a138108d139..c7b7ad4af5c8 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -7,6 +7,8 @@
#define _XE_SRIOV_TYPES_H_
#include <linux/build_bug.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
/**
* VFID - Virtual Function Identifier
@@ -37,4 +39,21 @@ enum xe_sriov_mode {
};
static_assert(XE_SRIOV_MODE_NONE);
+/**
+ * struct xe_device_pf - Xe PF related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_PF mode.
+ */
+struct xe_device_pf {
+ /** @device_total_vfs: Maximum number of VFs supported by the device. */
+ u16 device_total_vfs;
+
+ /** @driver_max_vfs: Maximum number of VFs supported by the driver. */
+ u16 driver_max_vfs;
+
+ /** @master_lock: protects all VFs configurations across GTs */
+ struct mutex master_lock;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 02c9577fe418..65f1f1628235 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
return 0;
}
-void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
- struct dma_fence *fence)
+void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return;
@@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
user_fence_put(sync->ufence);
dma_fence_put(fence);
}
- } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
- job->user_fence.used = true;
- job->user_fence.addr = sync->addr;
- job->user_fence.value = sync->timeline_value;
}
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 0fd0d51208e6..3e03396af2c6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
- struct xe_sched_job *job,
struct dma_fence *fence);
void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
struct dma_fence *
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0650b2fa75ef..15ea0a942f67 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -160,24 +160,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
{
int err;
- xe_device_mem_access_get(tile_to_xe(tile));
-
err = tile_ttm_mgr_init(tile);
if (err)
- goto err_mem_access;
+ return err;
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
- if (IS_ERR(tile->mem.kernel_bb_pool)) {
- err = PTR_ERR(tile->mem.kernel_bb_pool);
- goto err_mem_access;
- }
+ if (IS_ERR(tile->mem.kernel_bb_pool))
+ return PTR_ERR(tile->mem.kernel_bb_pool);
+
xe_wa_apply_tile_workarounds(tile);
- xe_tile_sysfs_init(tile);
+ err = xe_tile_sysfs_init(tile);
-err_mem_access:
- xe_device_mem_access_put(tile_to_xe(tile));
- return err;
+ return 0;
}
void xe_tile_migrate_wait(struct xe_tile *tile)
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index 0662968d7bcb..64661403afcd 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include <drm/drm_managed.h>
+#include "xe_pm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_vram_freq.h"
@@ -28,7 +29,7 @@ static void tile_sysfs_fini(struct drm_device *drm, void *arg)
kobject_put(tile->sysfs);
}
-void xe_tile_sysfs_init(struct xe_tile *tile)
+int xe_tile_sysfs_init(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
struct device *dev = xe->drm.dev;
@@ -37,7 +38,7 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
kt = kzalloc(sizeof(*kt), GFP_KERNEL);
if (!kt)
- return;
+ return -ENOMEM;
kobject_init(&kt->base, &xe_tile_sysfs_kobj_type);
kt->tile = tile;
@@ -45,16 +46,14 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
if (err) {
kobject_put(&kt->base);
- drm_warn(&xe->drm, "failed to register TILE sysfs directory, err: %d\n", err);
- return;
+ return err;
}
tile->sysfs = &kt->base;
- xe_vram_freq_sysfs_init(tile);
-
- err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
+ err = xe_vram_freq_sysfs_init(tile);
if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.h b/drivers/gpu/drm/xe/xe_tile_sysfs.h
index e4f065039eba..54a2ba8ba533 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.h
@@ -8,7 +8,7 @@
#include "xe_tile_sysfs_types.h"
-void xe_tile_sysfs_init(struct xe_tile *tile);
+int xe_tile_sysfs_init(struct xe_tile *tile);
static inline struct xe_tile *
kobj_to_tile(struct kobject *kobj)
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 846f14507d5f..2d56cfc09e42 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -258,7 +258,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__field(u32, guc_state)
__field(u32, flags)
__field(int, error)
- __field(u64, fence)
+ __field(struct dma_fence *, fence)
__field(u64, batch_addr)
),
@@ -269,11 +269,11 @@ DECLARE_EVENT_CLASS(xe_sched_job,
atomic_read(&job->q->guc->state);
__entry->flags = job->q->flags;
__entry->error = job->fence->error;
- __entry->fence = (unsigned long)job->fence;
+ __entry->fence = job->fence;
__entry->batch_addr = (u64)job->batch_addr[0];
),
- TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("fence=%p, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__entry->fence, __entry->seqno, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index 3107d2a12426..f77367329760 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -204,9 +204,14 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
{
struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size, io_size, pgsize;
+ u64 stolen_size, io_size;
int err;
+ if (!mgr) {
+ drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
+ return;
+ }
+
if (IS_SRIOV_VF(xe))
stolen_size = 0;
else if (IS_DGFX(xe))
@@ -221,10 +226,6 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
return;
}
- pgsize = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
- if (pgsize < PAGE_SIZE)
- pgsize = PAGE_SIZE;
-
/*
* We don't try to attempt partial visible support for stolen vram,
* since stolen is always at the end of vram, and the BAR size is pretty
@@ -235,7 +236,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
io_size = stolen_size;
err = __xe_ttm_vram_mgr_init(xe, &mgr->base, XE_PL_STOLEN, stolen_size,
- io_size, pgsize);
+ io_size, PAGE_SIZE);
if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
return;
@@ -298,7 +299,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */
- if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
return -EIO;
/* GGTT is always contiguously mapped */
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index 3e1fa0c832ca..9844a8edbfe1 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -73,7 +73,10 @@ static void xe_ttm_sys_mgr_del(struct ttm_resource_manager *man,
static void xe_ttm_sys_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
-
+ /*
+ * This function is called by debugfs entry and would require
+ * pm_runtime_{get,put} wrappers around any operation.
+ */
}
static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 115ec745e502..fe3779fdba2c 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -91,7 +91,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
min_page_size = mgr->default_page_size;
if (tbo->page_alignment)
- min_page_size = tbo->page_alignment << PAGE_SHIFT;
+ min_page_size = (u64)tbo->page_alignment << PAGE_SHIFT;
if (WARN_ON(min_page_size < mm->chunk_size)) {
err = -EINVAL;
@@ -196,7 +196,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
return 0;
error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -214,7 +214,7 @@ static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man,
struct drm_buddy *mm = &mgr->mm;
mutex_lock(&mgr->lock);
- drm_buddy_free_list(mm, &vres->blocks);
+ drm_buddy_free_list(mm, &vres->blocks, 0);
mgr->visible_avail += vres->used_visible_size;
mutex_unlock(&mgr->lock);
@@ -478,3 +478,15 @@ void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
*used_visible = mgr->visible_size - mgr->visible_avail;
mutex_unlock(&mgr->lock);
}
+
+u64 xe_ttm_vram_get_avail(struct ttm_resource_manager *man)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ u64 avail;
+
+ mutex_lock(&mgr->lock);
+ avail = mgr->mm.avail;
+ mutex_unlock(&mgr->lock);
+
+ return avail;
+}
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
index d184e19a9230..cc76050e376d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -25,6 +25,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
struct sg_table *sgt);
+u64 xe_ttm_vram_get_avail(struct ttm_resource_manager *man);
u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man);
void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
u64 *used, u64 *used_visible);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 5c83c75bc497..d4e6fa918942 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -28,7 +28,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
/* Xe2 */
{ XE_RTP_NAME("Tuning: L3 cache"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
@@ -38,11 +38,11 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
{}
@@ -50,7 +50,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
static const struct xe_rtp_entry_sr engine_tunings[] = {
{ XE_RTP_NAME("Tuning: Set Indirect State Override"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1271),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1274),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
@@ -88,7 +88,7 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
/* Xe_LPG */
{ XE_RTP_NAME("Tuning: L3 cache"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 7033f8c1b431..4feb35c95a1c 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -32,11 +32,8 @@ uc_to_xe(struct xe_uc *uc)
/* Should be called once at driver load only */
int xe_uc_init(struct xe_uc *uc)
{
- struct xe_device *xe = uc_to_xe(uc);
int ret;
- xe_device_mem_access_get(xe);
-
/*
* We call the GuC/HuC/GSC init functions even if GuC submission is off
* to correctly move our tracking of the FW state to "disabled".
@@ -65,16 +62,8 @@ int xe_uc_init(struct xe_uc *uc)
goto err;
ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
- if (ret)
- goto err;
-
- xe_device_mem_access_put(xe);
-
- return 0;
err:
- xe_device_mem_access_put(xe);
-
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
index 0a39ec5a6e99..78eb8db73791 100644
--- a/drivers/gpu/drm/xe/xe_uc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index a9d25b3fa67c..186f81640cef 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -17,6 +17,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_sriov.h"
#include "xe_uc_fw.h"
/*
@@ -296,36 +297,28 @@ static void uc_fw_fini(struct drm_device *drm, void *arg)
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
}
-static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
{
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
struct xe_uc_fw_version *compatibility = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
- xe_gt_assert(gt, release->major >= 70);
-
- if (release->major > 70 || release->minor >= 6) {
- /* v70.6.0 adds CSS header support */
- compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->submission_version);
- compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->submission_version);
- compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
- css->submission_version);
- } else if (release->minor >= 3) {
- /* v70.3.0 introduced v1.1.0 */
- compatibility->major = 1;
- compatibility->minor = 1;
- compatibility->patch = 0;
- } else {
- /* v70.0.0 introduced v1.0.0 */
- compatibility->major = 1;
- compatibility->minor = 0;
- compatibility->patch = 0;
+
+ /* We don't support GuC releases older than 70.19 */
+ if (release->major < 70 || (release->major == 70 && release->minor < 19)) {
+ xe_gt_err(gt, "Unsupported GuC v%u.%u! v70.19 or newer is required\n",
+ release->major, release->minor);
+ return -EINVAL;
}
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version);
+
uc_fw->private_data_size = css->private_data_size;
+
+ return 0;
}
int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw)
@@ -424,7 +417,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
- guc_read_css_info(uc_fw, css);
+ return guc_read_css_info(uc_fw, css);
return 0;
}
@@ -658,7 +651,17 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
xe_assert(xe, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
+
+ if (IS_SRIOV_VF(xe)) {
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+ return 0;
+ }
+
uc_fw_override(uc_fw);
+
xe_uc_fw_change_status(uc_fw, uc_fw->path ?
XE_UC_FIRMWARE_SELECTED :
XE_UC_FIRMWARE_NOT_SUPPORTED);
@@ -771,7 +774,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return 0;
err = uc_fw_copy(uc_fw, fw->data, fw->size,
- XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT);
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
uc_fw_release(fw);
@@ -787,7 +791,8 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
- u32 src_offset, dma_ctrl;
+ u64 src_offset;
+ u32 dma_ctrl;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h
index 85c20795d1f8..35078038797e 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw.h
@@ -59,6 +59,8 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
return "TRANSFERRED";
case XE_UC_FIRMWARE_RUNNING:
return "RUNNING";
+ case XE_UC_FIRMWARE_PRELOADED:
+ return "PRELOADED";
}
return "<invalid>";
}
@@ -85,6 +87,7 @@ static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
case XE_UC_FIRMWARE_LOADABLE:
case XE_UC_FIRMWARE_TRANSFERRED:
case XE_UC_FIRMWARE_RUNNING:
+ case XE_UC_FIRMWARE_PRELOADED:
return 0;
}
return -EINVAL;
@@ -134,7 +137,8 @@ static inline bool xe_uc_fw_is_available(struct xe_uc_fw *uc_fw)
static inline bool xe_uc_fw_is_loadable(struct xe_uc_fw *uc_fw)
{
- return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_LOADABLE;
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_LOADABLE &&
+ __xe_uc_fw_status(uc_fw) != XE_UC_FIRMWARE_PRELOADED;
}
static inline bool xe_uc_fw_is_loaded(struct xe_uc_fw *uc_fw)
@@ -144,7 +148,7 @@ static inline bool xe_uc_fw_is_loaded(struct xe_uc_fw *uc_fw)
static inline bool xe_uc_fw_is_running(struct xe_uc_fw *uc_fw)
{
- return __xe_uc_fw_status(uc_fw) == XE_UC_FIRMWARE_RUNNING;
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_RUNNING;
}
static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index bc800b696866..0d8caa0e7354 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -50,7 +50,8 @@ enum xe_uc_fw_status {
XE_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
XE_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
XE_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
- XE_UC_FIRMWARE_RUNNING /* init/auth done */
+ XE_UC_FIRMWARE_RUNNING, /* init/auth done */
+ XE_UC_FIRMWARE_PRELOADED, /* preloaded by the PF driver */
};
enum xe_uc_fw_type {
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f88faef4142b..4aa3943e6f29 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -21,12 +21,12 @@
#include <generated/xe_wa_oob.h>
+#include "regs/xe_gtt_defs.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
-#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
@@ -38,6 +38,7 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_wa.h"
+#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -65,113 +66,14 @@ int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
- const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
- struct page **pages;
- bool in_kthread = !current->mm;
- unsigned long notifier_seq;
- int pinned, ret, i;
- bool read_only = xe_vma_read_only(vma);
lockdep_assert_held(&vm->lock);
xe_assert(xe, xe_vma_is_userptr(vma));
-retry:
- if (vma->gpuva.flags & XE_VMA_DESTROYED)
- return 0;
-
- notifier_seq = mmu_interval_read_begin(&userptr->notifier);
- if (notifier_seq == userptr->notifier_seq)
- return 0;
-
- pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- if (userptr->sg) {
- dma_unmap_sgtable(xe->drm.dev,
- userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- }
-
- pinned = ret = 0;
- if (in_kthread) {
- if (!mmget_not_zero(userptr->notifier.mm)) {
- ret = -EFAULT;
- goto mm_closed;
- }
- kthread_use_mm(userptr->notifier.mm);
- }
-
- while (pinned < num_pages) {
- ret = get_user_pages_fast(xe_vma_userptr(vma) +
- pinned * PAGE_SIZE,
- num_pages - pinned,
- read_only ? 0 : FOLL_WRITE,
- &pages[pinned]);
- if (ret < 0)
- break;
-
- pinned += ret;
- ret = 0;
- }
-
- if (in_kthread) {
- kthread_unuse_mm(userptr->notifier.mm);
- mmput(userptr->notifier.mm);
- }
-mm_closed:
- if (ret)
- goto out;
-
- ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
- pinned, 0,
- (u64)pinned << PAGE_SHIFT,
- xe_sg_segment_size(xe->drm.dev),
- GFP_KERNEL);
- if (ret) {
- userptr->sg = NULL;
- goto out;
- }
- userptr->sg = &userptr->sgt;
-
- ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- goto out;
- }
-
- for (i = 0; i < pinned; ++i) {
- if (!read_only) {
- lock_page(pages[i]);
- set_page_dirty(pages[i]);
- unlock_page(pages[i]);
- }
-
- mark_page_accessed(pages[i]);
- }
-out:
- release_pages(pages, pinned);
- kvfree(pages);
-
- if (!(ret < 0)) {
- userptr->notifier_seq = notifier_seq;
- if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
- goto retry;
- }
-
- return ret < 0 ? ret : 0;
+ return xe_hmm_userptr_populate_range(uvma, false);
}
static bool preempt_fences_waiting(struct xe_vm *vm)
@@ -482,17 +384,53 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
return 0;
}
+/**
+ * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
+ * @vm: The vm for which we are rebinding.
+ * @exec: The struct drm_exec with the locked GEM objects.
+ * @num_fences: The number of fences to reserve for the operation, not
+ * including rebinds and validations.
+ *
+ * Validates all evicted gem objects and rebinds their vmas. Note that
+ * rebindings may cause evictions and hence the validation-rebind
+ * sequence is rerun until there are no more objects to validate.
+ *
+ * Return: 0 on success, negative error code on error. In particular,
+ * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
+ * the drm_exec transaction needs to be restarted.
+ */
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ do {
+ ret = drm_gpuvm_validate(&vm->gpuvm, exec);
+ if (ret)
+ return ret;
+
+ ret = xe_vm_rebind(vm, false);
+ if (ret)
+ return ret;
+ } while (!list_empty(&vm->gpuvm.evict.list));
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{
int err;
- /*
- * 1 fence for each preempt fence plus a fence for each tile from a
- * possible rebind
- */
- err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
- vm->xe->info.tile_count);
+ err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -507,7 +445,7 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
return 0;
}
- err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
+ err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -515,14 +453,19 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
if (err)
return err;
- return drm_gpuvm_validate(&vm->gpuvm, exec);
+ /*
+ * Add validation and rebinding to the locking loop since both can
+ * cause evictions which may require blocing dma_resv locks.
+ * The fence reservation here is intended for the new preempt fences
+ * we attach at the end of the rebind work.
+ */
+ return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
}
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
struct drm_exec exec;
- struct dma_fence *rebind_fence;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
ktime_t end = 0;
@@ -568,18 +511,11 @@ retry:
if (err)
goto out_unlock;
- rebind_fence = xe_vm_rebind(vm, true);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
+ err = xe_vm_rebind(vm, true);
+ if (err)
goto out_unlock;
- }
-
- if (rebind_fence) {
- dma_fence_wait(rebind_fence, false);
- dma_fence_put(rebind_fence);
- }
- /* Wait on munmap style VM unbinds */
+ /* Wait on rebinds and munmap style VM unbinds */
wait = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
@@ -648,6 +584,10 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!mmu_notifier_range_blockable(range))
return false;
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
down_write(&vm->userptr.notifier_lock);
mmu_interval_set_seq(mni, cur_seq);
@@ -773,14 +713,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock);
if (xe_vm_in_lr_mode(vm) && !rebind_worker)
- return NULL;
+ return 0;
xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list,
@@ -788,17 +728,17 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind);
- dma_fence_put(fence);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence))
- return fence;
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
}
- return fence;
+ return 0;
}
static void xe_vma_free(struct xe_vma *vma)
@@ -917,8 +857,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
static void xe_vma_destroy_late(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- bool read_only = xe_vma_read_only(vma);
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
@@ -926,16 +864,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
}
if (xe_vma_is_userptr(vma)) {
- struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+ struct xe_userptr *userptr = &uvma->userptr;
- if (userptr->sg) {
- dma_unmap_sgtable(xe->drm.dev,
- userptr->sg,
- read_only ? DMA_TO_DEVICE :
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
- }
+ if (userptr->sg)
+ xe_hmm_userptr_free_sg(uvma);
/*
* Since userptr pages are not pinned, we can't remove
@@ -1004,35 +937,26 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
}
/**
- * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * xe_vm_lock_vma() - drm_exec utility to lock a vma
* @exec: The drm_exec object we're currently locking for.
* @vma: The vma for witch we want to lock the vm resv and any attached
* object's resv.
- * @num_shared: The number of dma-fence slots to pre-allocate in the
- * objects' reservation objects.
*
* Return: 0 on success, negative error code on error. In particular
* may return -EDEADLK on WW transaction contention and -EINTR if
* an interruptible wait is terminated by a signal.
*/
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared)
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_bo *bo = xe_vma_bo(vma);
int err;
XE_WARN_ON(!vm);
- if (num_shared)
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- else
- err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
- if (!err && bo && !bo->vm) {
- if (num_shared)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
- else
- err = drm_exec_lock_obj(exec, &bo->ttm.base);
- }
+
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm)
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
return err;
}
@@ -1044,7 +968,7 @@ static void xe_vma_destroy_unlocked(struct xe_vma *vma)
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
- err = xe_vm_prepare_vma(&exec, vma, 0);
+ err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
@@ -1249,8 +1173,6 @@ static const struct xe_pt_ops xelp_pt_ops = {
.pde_encode_bo = xelp_pde_encode_bo,
};
-static void vm_destroy_work_func(struct work_struct *w);
-
/**
* xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
* given tile and vm.
@@ -1330,8 +1252,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
- INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
-
INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
@@ -1341,7 +1261,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->pt_ops = &xelp_pt_ops;
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1386,9 +1306,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (flags & XE_VM_FLAG_LR_MODE) {
+ if (vm->flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
- vm->flags |= XE_VM_FLAG_LR_MODE;
vm->batch_invalidate_tlb = false;
}
@@ -1452,7 +1371,7 @@ err_no_resv:
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ERR_PTR(err);
}
@@ -1552,6 +1471,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1560,47 +1489,31 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_put(vm);
}
-static void vm_destroy_work_func(struct work_struct *w)
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
- struct xe_vm *vm =
- container_of(w, struct xe_vm, destroy_work);
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
- mutex_destroy(&vm->snap_mutex);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
- xe_device_mem_access_put(xe);
+ mutex_destroy(&vm->snap_mutex);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
+ xe_pm_runtime_put(xe);
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
- dma_fence_put(vm->rebind_fence);
kfree(vm);
}
-static void xe_vm_free(struct drm_gpuvm *gpuvm)
-{
- struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
-
- /* To destroy the VM we need to be able to sleep */
- queue_work(system_unbound_wq, &vm->destroy_work);
-}
-
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{
struct xe_vm *vm;
@@ -1697,7 +1610,7 @@ next:
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
return fence;
@@ -1771,7 +1684,7 @@ next:
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL,
+ xe_sync_entry_signal(&syncs[i],
cf ? &cf->base : fence);
}
@@ -1832,7 +1745,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) {
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
}
}
@@ -2033,7 +1946,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
int err;
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
@@ -2053,7 +1966,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
dma_fence_put(fence);
}
}
@@ -2185,6 +2098,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.immediate =
+ flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+ op->map.read_only =
+ flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
@@ -2379,6 +2296,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ flags |= op->map.read_only ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
@@ -2512,7 +2431,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
lockdep_assert_held_write(&vm->lock);
- err = xe_vm_prepare_vma(exec, vma, 1);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
@@ -2523,7 +2442,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- !xe_vm_in_fault_mode(vm),
+ op->map.immediate || !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2798,7 +2717,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+#define SUPPORTED_FLAGS \
+ (DRM_XE_VM_BIND_FLAG_READONLY | \
+ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
+ DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -2931,7 +2853,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
return PTR_ERR(fence);
for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
fence);
@@ -3042,7 +2964,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_obj;
}
- if (bos[i]->flags & XE_BO_INTERNAL_64K) {
+ if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
@@ -3234,6 +3156,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "INVALIDATE: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
@@ -3352,8 +3278,10 @@ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
if (num_snaps)
snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
- if (!snap)
+ if (!snap) {
+ snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
goto out_unlock;
+ }
snap->num_snaps = num_snaps;
i = 0;
@@ -3393,6 +3321,9 @@ out_unlock:
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
{
+ if (IS_ERR_OR_NULL(snap))
+ return;
+
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
struct iosys_map src;
@@ -3447,13 +3378,21 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
{
unsigned long i, j;
- for (i = 0; i < snap->num_snaps; i++) {
- if (IS_ERR(snap->snap[i].data))
- goto uncaptured;
+ if (IS_ERR_OR_NULL(snap)) {
+ drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
+ return;
+ }
+ for (i = 0; i < snap->num_snaps; i++) {
drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
- drm_printf(p, "[%llx].data: ",
- snap->snap[i].ofs);
+
+ if (IS_ERR(snap->snap[i].data)) {
+ drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
+ PTR_ERR(snap->snap[i].data));
+ continue;
+ }
+
+ drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
u32 *val = snap->snap[i].data + j;
@@ -3463,12 +3402,6 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
- continue;
-
-uncaptured:
- drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
- snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
- PTR_ERR(snap->snap[i].data));
}
}
@@ -3476,7 +3409,7 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
{
unsigned long i;
- if (!snap)
+ if (IS_ERR_OR_NULL(snap))
return;
for (i = 0; i < snap->num_snaps; i++) {
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6df1f1c7f85d..306cd0934a19 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -242,8 +242,10 @@ bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared);
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
+
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
/**
* xe_vm_resv() - Return's the vm's reservation object
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ae5fb565f6bf..72a100671e5d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -177,16 +177,6 @@ struct xe_vm {
*/
struct list_head rebind_list;
- /** @rebind_fence: rebind fence from execbuf */
- struct dma_fence *rebind_fence;
-
- /**
- * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
- * from an irq context can be last put and the destroy needs to be able
- * to sleep.
- */
- struct work_struct destroy_work;
-
/**
* @rftree: range fence tree to track updates to page table structure.
* Used to implement conflict tracking between independent bind engines.
@@ -264,6 +254,11 @@ struct xe_vm {
bool capture_once;
} error_capture;
+ /**
+ * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
+ * protected by the vm resv.
+ */
+ u64 tlb_flush_seqno;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -274,6 +269,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ /** @immediate: Immediate bind */
+ bool immediate;
+ /** @read_only: Read only */
+ bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index c5f6b5a5d117..3e21ddc6e60c 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -100,31 +100,27 @@ static void vram_freq_sysfs_fini(struct drm_device *drm, void *arg)
* @tile: Xe Tile object
*
* It needs to be initialized after the main tile component is ready
+ *
+ * Returns: 0 on success, negative error code on error.
*/
-void xe_vram_freq_sysfs_init(struct xe_tile *tile)
+int xe_vram_freq_sysfs_init(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
struct kobject *kobj;
int err;
if (xe->info.platform != XE_PVC)
- return;
+ return 0;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj) {
- drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
- return;
- }
+ if (!kobj)
+ return -ENOMEM;
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
kobject_put(kobj);
- drm_warn(&xe->drm, "failed to register vram freq sysfs, err: %d\n", err);
- return;
+ return err;
}
- err = drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
- if (err)
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
+ return drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
}
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.h b/drivers/gpu/drm/xe/xe_vram_freq.h
index cbe8c12fbd64..bf726bc5881f 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.h
+++ b/drivers/gpu/drm/xe/xe_vram_freq.h
@@ -8,6 +8,6 @@
struct xe_tile;
-void xe_vram_freq_sysfs_init(struct xe_tile *tile);
+int xe_vram_freq_sysfs_init(struct xe_tile *tile);
#endif /* _XE_VRAM_FREQ_H_ */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index a0264eedd443..dd214d95e4b6 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -173,11 +173,11 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(CLR(MISCCPCTL, DOP_CLOCK_GATE_RENDER_ENABLE))
},
{ XE_RTP_NAME("14018575942"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(COMP_MOD_CTRL, FORCE_MISS_FTLB))
},
{ XE_RTP_NAME("22016670082"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(SQCNT1, ENFORCE_RAR))
},
@@ -228,6 +228,28 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPM */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(1301),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14020316580"),
+ XE_RTP_RULES(MEDIA_VERSION(1301)),
+ XE_RTP_ACTIONS(CLR(PG_ENABLE,
+ VD0_HCP_POWERGATE_ENABLE |
+ VD0_MFXVDENC_POWERGATE_ENABLE |
+ VD2_HCP_POWERGATE_ENABLE |
+ VD2_MFXVDENC_POWERGATE_ENABLE)),
+ },
+ { XE_RTP_NAME("14019449301"),
+ XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+
{}
};
@@ -328,12 +350,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
},
- { XE_RTP_NAME("16015675438"),
- XE_RTP_RULES(PLATFORM(DG2),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
- PERF_FIX_BALANCING_CFE_DISABLE))
- },
{ XE_RTP_NAME("18028616096"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
@@ -383,10 +399,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
},
- { XE_RTP_NAME("16015675438"),
- XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
- PERF_FIX_BALANCING_CFE_DISABLE))
+ { XE_RTP_NAME("18020744125"),
+ XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute),
+ ENGINE_CLASS(COMPUTE)),
+ XE_RTP_ACTIONS(SET(RING_HWSTAM(RENDER_RING_BASE), ~0))
},
{ XE_RTP_NAME("14014999345"),
XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COMPUTE),
@@ -397,7 +413,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe_LPG */
{ XE_RTP_NAME("14017856879"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, DIS_FIX_EOT1_FLUSH))
},
@@ -407,6 +423,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
XE_RTP_NOCHECK))
},
+ { XE_RTP_NAME("14020495402"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_TDL_SVHS_GATING))
+ },
/* Xe2_LPG */
@@ -424,8 +445,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN5, DISABLE_SAMPLE_G_PERFORMANCE))
},
- { XE_RTP_NAME("16021540221"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ { XE_RTP_NAME("14020338487"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
+ },
+ { XE_RTP_NAME("18034896535, 16021540221"), /* 16021540221: GRAPHICS_STEP(A0, B0) */
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
},
@@ -460,6 +485,65 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
},
+
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16018712365"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
+ },
+ { XE_RTP_NAME("16018737384"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
+ },
+ { XE_RTP_NAME("14019988906"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
+ },
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
+ { XE_RTP_NAME("14020338487"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
+ },
+ { XE_RTP_NAME("18032247524"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
+ },
+ { XE_RTP_NAME("14018471104"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
+ },
+ /*
+ * Although this workaround isn't required for the RCS, disabling these
+ * reports has no impact for our driver or the GuC, so we go ahead and
+ * apply this to all engines for simplicity.
+ */
+ { XE_RTP_NAME("16021639441"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ { XE_RTP_NAME("14019811474"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
+ },
+
+ /* Xe2_HPM */
+
+ { XE_RTP_NAME("16021639441"),
+ XE_RTP_RULES(MEDIA_VERSION(1301)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+
{}
};
@@ -537,7 +621,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
/* Xe_LPG */
{ XE_RTP_NAME("18019271663"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1274)),
XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
},
{ XE_RTP_NAME("14019877138"),
@@ -580,6 +664,24 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
+
+ /* Xe2_HPG */
+ { XE_RTP_NAME("15010599737"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
+ },
+ { XE_RTP_NAME("14019386621"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
+ },
+ { XE_RTP_NAME("14020756599"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index b138cbd51bdb..12fe88796a49 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -4,9 +4,6 @@
22011391025 PLATFORM(DG2)
22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
-16015675438 PLATFORM(PVC)
- SUBPLATFORM(DG2, G10)
- SUBPLATFORM(DG2, G12)
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
@@ -22,3 +19,11 @@
GRAPHICS_VERSION_RANGE(1270, 1274)
MEDIA_VERSION(1300)
PLATFORM(DG2)
+14018094691 GRAPHICS_VERSION(2004)
+14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+18024947630 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+ MEDIA_VERSION(2000)
+16022287689 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+13011645652 GRAPHICS_VERSION(2004)
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 68ee897de9d7..41d753b14ccd 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -1,13 +1,15 @@
config DRM_ZYNQMP_DPSUB
tristate "ZynqMP DisplayPort Controller Driver"
depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on COMMON_CLK && DRM && OF
+ depends on COMMON_CLK
depends on DMADEVICES
+ depends on DRM
+ depends on DRM_DISPLAY_DP_HELPER
+ depends on DRM_DISPLAY_HELPER
+ depends on OF
depends on PHY_XILINX_ZYNQMP
depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 8a39b3accce5..13157da0089e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -18,6 +18,7 @@
#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -65,14 +66,26 @@
#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
/**
+ * enum zynqmp_dpsub_layer_mode - Layer mode
+ * @ZYNQMP_DPSUB_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DPSUB_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_dpsub_layer_mode {
+ ZYNQMP_DPSUB_LAYER_NONLIVE,
+ ZYNQMP_DPSUB_LAYER_LIVE,
+};
+
+/**
* struct zynqmp_disp_format - Display subsystem format information
* @drm_fmt: DRM format (4CC)
+ * @bus_fmt: Media bus format
* @buf_fmt: AV buffer format
* @swap: Flag to swap R & B for RGB formats, and U & V for YUV formats
* @sf: Scaling factors for color components
*/
struct zynqmp_disp_format {
u32 drm_fmt;
+ u32 bus_fmt;
u32 buf_fmt;
bool swap;
const u32 *sf;
@@ -172,6 +185,12 @@ static const u32 scaling_factors_565[] = {
ZYNQMP_DISP_AV_BUF_5BIT_SF,
};
+static const u32 scaling_factors_666[] = {
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ ZYNQMP_DISP_AV_BUF_6BIT_SF,
+};
+
static const u32 scaling_factors_888[] = {
ZYNQMP_DISP_AV_BUF_8BIT_SF,
ZYNQMP_DISP_AV_BUF_8BIT_SF,
@@ -354,6 +373,41 @@ static const struct zynqmp_disp_format avbuf_gfx_fmts[] = {
},
};
+/* List of live video layer formats */
+static const struct zynqmp_disp_format avbuf_live_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .sf = scaling_factors_666,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .bus_fmt = MEDIA_BUS_FMT_RGB888_1X24,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .sf = scaling_factors_888,
+ }, {
+ .drm_fmt = DRM_FORMAT_P210,
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .buf_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 |
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .sf = scaling_factors_101010,
+ },
+};
+
static u32 zynqmp_disp_avbuf_read(struct zynqmp_disp *disp, int reg)
{
return readl(disp->avbuf.base + reg);
@@ -382,19 +436,29 @@ static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp *disp,
const struct zynqmp_disp_format *fmt)
{
unsigned int i;
- u32 val;
-
- val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
- val &= zynqmp_disp_layer_is_video(layer)
- ? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
- : ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
- val |= fmt->buf_fmt;
- zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_FMT, val);
+ u32 val, reg;
+
+ layer->disp_fmt = fmt;
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE) {
+ reg = ZYNQMP_DISP_AV_BUF_FMT;
+ val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
+ val &= zynqmp_disp_layer_is_video(layer)
+ ? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
+ : ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ val |= fmt->buf_fmt;
+ zynqmp_disp_avbuf_write(disp, reg, val);
+ } else {
+ reg = zynqmp_disp_layer_is_video(layer)
+ ? ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG
+ : ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+ val = fmt->buf_fmt;
+ zynqmp_disp_avbuf_write(disp, reg, val);
+ }
for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++) {
- unsigned int reg = zynqmp_disp_layer_is_video(layer)
- ? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
- : ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
+ reg = zynqmp_disp_layer_is_video(layer)
+ ? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
+ : ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
zynqmp_disp_avbuf_write(disp, reg, fmt->sf[i]);
}
@@ -873,10 +937,40 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_layer_find_live_format - Find format information for given
+ * media bus format
+ * @layer: The layer
+ * @drm_fmt: Media bus format to search
+ *
+ * Search display subsystem format information corresponding to the given media
+ * bus format @media_bus_format for the @layer, and return a pointer to the
+ * format descriptor.
+ *
+ * Return: A pointer to the format descriptor if found, NULL otherwise
+ */
+static const struct zynqmp_disp_format *
+zynqmp_disp_layer_find_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->info->num_formats; i++)
+ if (layer->info->formats[i].bus_fmt == media_bus_format)
+ return &layer->info->formats[i];
+
+ return NULL;
+}
+
+/**
* zynqmp_disp_layer_drm_formats - Return the DRM formats supported by the layer
* @layer: The layer
* @num_formats: Pointer to the returned number of formats
*
+ * NOTE: This function doesn't make sense for live video layers and will
+ * always return an empty list in such cases. zynqmp_disp_live_layer_formats()
+ * should be used to query a list of media bus formats supported by the live
+ * video input layer.
+ *
* Return: A newly allocated u32 array that stores all the DRM formats
* supported by the layer. The number of formats in the array is returned
* through the num_formats argument.
@@ -887,10 +981,17 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int i;
u32 *formats;
+ if (WARN_ON(!layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)) {
+ *num_formats = 0;
+ return NULL;
+ }
+
formats = kcalloc(layer->info->num_formats, sizeof(*formats),
GFP_KERNEL);
- if (!formats)
+ if (!formats) {
+ *num_formats = 0;
return NULL;
+ }
for (i = 0; i < layer->info->num_formats; ++i)
formats[i] = layer->info->formats[i].drm_fmt;
@@ -900,17 +1001,51 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_live_layer_formats - Return the media bus formats supported by
+ * the live video layer
+ * @layer: The layer
+ * @num_formats: Pointer to the returned number of formats
+ *
+ * NOTE: This function should be used only for live video input layers.
+ *
+ * Return: A newly allocated u32 array of media bus formats supported by the
+ * layer. The number of formats in the array is returned through the
+ * @num_formats argument.
+ */
+u32 *zynqmp_disp_live_layer_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats)
+{
+ unsigned int i;
+ u32 *formats;
+
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_LIVE)) {
+ *num_formats = 0;
+ return NULL;
+ }
+
+ formats = kcalloc(layer->info->num_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats) {
+ *num_formats = 0;
+ return NULL;
+ }
+
+ for (i = 0; i < layer->info->num_formats; ++i)
+ formats[i] = layer->info->formats[i].bus_fmt;
+
+ *num_formats = layer->info->num_formats;
+ return formats;
+}
+
+/**
* zynqmp_disp_layer_enable - Enable a layer
* @layer: The layer
- * @mode: Operating mode of layer
*
* Enable the @layer in the audio/video buffer manager and the blender. DMA
* channels are started separately by zynqmp_disp_layer_update().
*/
-void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
- enum zynqmp_dpsub_layer_mode mode)
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer)
{
- layer->mode = mode;
zynqmp_disp_avbuf_enable_video(layer->disp, layer);
zynqmp_disp_blend_layer_enable(layer->disp, layer);
}
@@ -926,7 +1061,7 @@ void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
{
unsigned int i;
- if (layer->disp->dpsub->dma_enabled) {
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE) {
for (i = 0; i < layer->drm_fmt->num_planes; i++)
dmaengine_terminate_sync(layer->dmas[i].chan);
}
@@ -940,6 +1075,9 @@ void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
* @layer: The layer
* @info: The format info
*
+ * NOTE: Use zynqmp_disp_layer_set_live_format() to set media bus format for
+ * live video layers.
+ *
* Set the format for @layer to @info. The layer must be disabled.
*/
void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
@@ -947,14 +1085,16 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
{
unsigned int i;
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_NONLIVE))
+ return;
+
layer->disp_fmt = zynqmp_disp_layer_find_format(layer, info->format);
+ if (WARN_ON(!layer->disp_fmt))
+ return;
layer->drm_fmt = info;
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
- if (!layer->disp->dpsub->dma_enabled)
- return;
-
/*
* Set pconfig for each DMA channel to indicate they're part of a
* video group.
@@ -975,6 +1115,32 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_layer_set_live_format - Set the live video layer format
+ * @layer: The layer
+ * @info: The format info
+ *
+ * NOTE: This function should not be used to set format for non-live video
+ * layer. Use zynqmp_disp_layer_set_format() instead.
+ *
+ * Set the display format for the live @layer. The layer must be disabled.
+ */
+void zynqmp_disp_layer_set_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format)
+{
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_LIVE))
+ return;
+
+ layer->disp_fmt = zynqmp_disp_layer_find_live_format(layer,
+ media_bus_format);
+ if (WARN_ON(!layer->disp_fmt))
+ return;
+
+ zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
+
+ layer->drm_fmt = drm_format_info(layer->disp_fmt->drm_fmt);
+}
+
+/**
* zynqmp_disp_layer_update - Update the layer framebuffer
* @layer: The layer
* @state: The plane state
@@ -990,7 +1156,7 @@ int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
const struct drm_format_info *info = layer->drm_fmt;
unsigned int i;
- if (!layer->disp->dpsub->dma_enabled)
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_LIVE)
return 0;
for (i = 0; i < info->num_planes; i++) {
@@ -1040,9 +1206,6 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
{
unsigned int i;
- if (!layer->info || !disp->dpsub->dma_enabled)
- return;
-
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
@@ -1083,9 +1246,6 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
unsigned int i;
int ret;
- if (!disp->dpsub->dma_enabled)
- return 0;
-
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
char dma_channel_name[16];
@@ -1124,6 +1284,11 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
.num_channels = 1,
},
};
+ static const struct zynqmp_disp_layer_info live_layer_info = {
+ .formats = avbuf_live_fmts,
+ .num_formats = ARRAY_SIZE(avbuf_live_fmts),
+ .num_channels = 0,
+ };
unsigned int i;
int ret;
@@ -1133,7 +1298,17 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
layer->id = i;
layer->disp = disp;
- layer->info = &layer_info[i];
+ /*
+ * For now assume dpsub works in either live or non-live mode for both layers.
+ * Hybrid mode is not supported yet.
+ */
+ if (disp->dpsub->dma_enabled) {
+ layer->mode = ZYNQMP_DPSUB_LAYER_NONLIVE;
+ layer->info = &layer_info[i];
+ } else {
+ layer->mode = ZYNQMP_DPSUB_LAYER_LIVE;
+ layer->info = &live_layer_info;
+ }
ret = zynqmp_disp_layer_request_dma(disp, layer);
if (ret)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
index 123cffac08be..fa545533c9d1 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -42,16 +42,6 @@ enum zynqmp_dpsub_layer_id {
ZYNQMP_DPSUB_LAYER_GFX,
};
-/**
- * enum zynqmp_dpsub_layer_mode - Layer mode
- * @ZYNQMP_DPSUB_LAYER_NONLIVE: non-live (memory) mode
- * @ZYNQMP_DPSUB_LAYER_LIVE: live (stream) mode
- */
-enum zynqmp_dpsub_layer_mode {
- ZYNQMP_DPSUB_LAYER_NONLIVE,
- ZYNQMP_DPSUB_LAYER_LIVE,
-};
-
void zynqmp_disp_enable(struct zynqmp_disp *disp);
void zynqmp_disp_disable(struct zynqmp_disp *disp);
int zynqmp_disp_setup_clock(struct zynqmp_disp *disp,
@@ -62,11 +52,14 @@ void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int *num_formats);
-void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
- enum zynqmp_dpsub_layer_mode mode);
+u32 *zynqmp_disp_live_layer_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats);
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer);
void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer);
void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
const struct drm_format_info *info);
+void zynqmp_disp_layer_set_live_format(struct zynqmp_disp_layer *layer,
+ u32 media_bus_format);
int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
index f92a006d5070..fa3935384834 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
@@ -165,10 +165,10 @@
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
-#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB (0x0 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 (0x1 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 (0x2 << 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY (0x3 << 4)
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 1846c4971fd8..8c2d24809014 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -1276,28 +1277,45 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
* DISP Configuration
*/
+/**
+ * zynqmp_dp_disp_connected_live_layer - Return the first connected live layer
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: The first connected live display layer or NULL if none of the live
+ * layers are connected.
+ */
+static struct zynqmp_disp_layer *
+zynqmp_dp_disp_connected_live_layer(struct zynqmp_dp *dp)
+{
+ if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
+ return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
+ else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
+ return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
+ else
+ return NULL;
+}
+
static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
struct drm_bridge_state *old_bridge_state)
{
- enum zynqmp_dpsub_layer_id layer_id;
struct zynqmp_disp_layer *layer;
- const struct drm_format_info *info;
+ struct drm_bridge_state *bridge_state;
+ u32 bus_fmt;
- if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
- layer_id = ZYNQMP_DPSUB_LAYER_VID;
- else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
- layer_id = ZYNQMP_DPSUB_LAYER_GFX;
- else
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (!layer)
return;
- layer = dp->dpsub->layers[layer_id];
+ bridge_state = drm_atomic_get_new_bridge_state(old_bridge_state->base.state,
+ old_bridge_state->bridge);
+ if (WARN_ON(!bridge_state))
+ return;
- /* TODO: Make the format configurable. */
- info = drm_format_info(DRM_FORMAT_YUV422);
- zynqmp_disp_layer_set_format(layer, info);
- zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_LIVE);
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ zynqmp_disp_layer_set_live_format(layer, bus_fmt);
+ zynqmp_disp_layer_enable(layer);
- if (layer_id == ZYNQMP_DPSUB_LAYER_GFX)
+ if (layer == dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX])
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, true, 255);
else
zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, false, 0);
@@ -1310,11 +1328,8 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
{
struct zynqmp_disp_layer *layer;
- if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
- layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
- else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
- layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
- else
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (!layer)
return;
zynqmp_disp_disable(dp->dpsub->disp);
@@ -1568,6 +1583,35 @@ static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *brid
return drm_edid_read_ddc(connector, &dp->aux.ddc);
}
+static u32 *zynqmp_dp_bridge_default_bus_fmts(unsigned int *num_input_fmts)
+{
+ u32 *formats = kzalloc(sizeof(*formats), GFP_KERNEL);
+
+ if (formats)
+ *formats = MEDIA_BUS_FMT_FIXED;
+ *num_input_fmts = !!formats;
+
+ return formats;
+}
+
+static u32 *
+zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ struct zynqmp_disp_layer *layer;
+
+ layer = zynqmp_dp_disp_connected_live_layer(dp);
+ if (layer)
+ return zynqmp_disp_live_layer_formats(layer, num_input_fmts);
+ else
+ return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts);
+}
+
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.attach = zynqmp_dp_bridge_attach,
.detach = zynqmp_dp_bridge_detach,
@@ -1580,6 +1624,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.atomic_check = zynqmp_dp_bridge_atomic_check,
.detect = zynqmp_dp_bridge_detect,
.edid_read = zynqmp_dp_bridge_edid_read,
+ .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts,
};
/* -----------------------------------------------------------------------------
@@ -1714,6 +1759,10 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
goto err_free;
}
+ ret = zynqmp_dp_reset(dp, true);
+ if (ret < 0)
+ goto err_free;
+
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
goto err_free;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index db3bb4afbfc4..43bf416b33d5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -122,7 +122,7 @@ static void zynqmp_dpsub_plane_atomic_update(struct drm_plane *plane,
/* Enable or re-enable the plane if the format has changed. */
if (format_changed)
- zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_NONLIVE);
+ zynqmp_disp_layer_enable(layer);
}
static const struct drm_plane_helper_funcs zynqmp_dpsub_plane_helper_funcs = {
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 783975d1384f..7c52757a89db 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0;
}
-static int host1x_dma_configure(struct device *dev)
-{
- return of_dma_configure(dev, dev->of_node, true);
-}
-
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
- .dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- of_dma_configure(&device->dev, host1x->dev->of_node, true);
-
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e6a8b6d8eab7..3c3c497b6b91 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index f9cceaeffd08..da5ea5a23b08 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev)
{
+#if IS_REACHABLE(CONFIG_IIO)
struct mcp2221 *mcp = hid_get_drvdata(hdev);
cancel_delayed_work_sync(&mcp->init_work);
+#endif
}
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index ab5953fc2436..80e0f23c1c33 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -481,10 +481,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_TR, JC_BTN_R, },
{ BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
{ BTN_START, JC_BTN_PLUS, },
- { BTN_FORWARD, JC_BTN_Y, }, /* C UP */
- { BTN_BACK, JC_BTN_ZR, }, /* C DOWN */
- { BTN_LEFT, JC_BTN_X, }, /* C LEFT */
- { BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */
+ { BTN_SELECT, JC_BTN_Y, }, /* C UP */
+ { BTN_X, JC_BTN_ZR, }, /* C DOWN */
+ { BTN_Y, JC_BTN_X, }, /* C LEFT */
+ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2df1ab3c31cc..d965382196c6 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -556,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
@@ -735,12 +726,15 @@ static int i2c_hid_parse(struct hid_device *hid)
mutex_lock(&ihid->reset_lock);
do {
ret = i2c_hid_start_hwreset(ihid);
- if (ret)
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ else
msleep(1000);
} while (tries-- > 0 && ret);
+ mutex_unlock(&ihid->reset_lock);
if (ret)
- goto abort_reset;
+ return ret;
use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
&rsize);
@@ -750,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else {
rdesc = kzalloc(rsize, GFP_KERNEL);
-
- if (!rdesc) {
- ret = -ENOMEM;
- goto abort_reset;
- }
+ if (!rdesc)
+ return -ENOMEM;
i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@@ -763,23 +754,10 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
- goto abort_reset;
+ goto out;
}
}
- /*
- * Windows directly reads the report-descriptor after sending reset
- * and then waits for resets completion afterwards. Some touchpads
- * actually wait for the report-descriptor to be read before signalling
- * reset completion.
- */
- ret = i2c_hid_finish_hwreset(ihid);
-abort_reset:
- clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
- mutex_unlock(&ihid->reset_lock);
- if (ret)
- goto out;
-
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd7c4..dd5fc60874ba 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index adbf674355b2..fb8cd8469328 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
hv_ringbuffer_cleanup(&channel->inbound);
if (channel->ringbuffer_page) {
- __free_pages(channel->ringbuffer_page,
+ /* In a CoCo VM leak the memory if it didn't get re-encrypted */
+ if (!channel->ringbuffer_gpadlhandle.decrypted)
+ __free_pages(channel->ringbuffer_page,
get_order(channel->ringbuffer_pagecount
<< PAGE_SHIFT));
channel->ringbuffer_page = NULL;
@@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
- if (ret)
+ if (ret) {
+ gpadl->decrypted = false;
return ret;
+ }
+ /*
+ * Set the "decrypted" flag to true for the set_memory_decrypted()
+ * success case. In the failure case, the encryption state of the
+ * memory is unknown. Leave "decrypted" as true to ensure the
+ * memory will be leaked instead of going back on the free list.
+ */
+ gpadl->decrypted = true;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
@@ -527,9 +538,15 @@ cleanup:
kfree(msginfo);
- if (ret)
- set_memory_encrypted((unsigned long)kbuffer,
- PFN_UP(size));
+ if (ret) {
+ /*
+ * If set_memory_encrypted() fails, the decrypted flag is
+ * left as true so the memory is leaked instead of being
+ * put back on the free list.
+ */
+ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+ gpadl->decrypted = false;
+ }
return ret;
}
@@ -850,6 +867,8 @@ post_msg_err:
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+ gpadl->decrypted = ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3cabeeabb1ca..f001ae880e1d 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -237,8 +237,17 @@ int vmbus_connect(void)
vmbus_connection.monitor_pages[0], 1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1], 1);
- if (ret)
+ if (ret) {
+ /*
+ * If set_memory_decrypted() fails, the encryption state
+ * of the memory is unknown. So leak the memory instead
+ * of risking returning decrypted memory to the free list.
+ * For simplicity, always handle both pages the same.
+ */
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
goto cleanup;
+ }
/*
* Set_memory_decrypted() will change the memory contents if
@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ if (vmbus_connection.monitor_pages[0]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[0], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages[0] = NULL;
+ }
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (vmbus_connection.monitor_pages[1]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[1], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ vmbus_connection.monitor_pages[1] = NULL;
+ }
}
/*
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4cb17603a828..12a707ab73f8 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -131,7 +131,7 @@ static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
}
static DEVICE_ATTR_RO(id);
@@ -142,7 +142,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->state);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
}
static DEVICE_ATTR_RO(state);
@@ -153,7 +153,7 @@ static ssize_t monitor_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
static DEVICE_ATTR_RO(monitor_id);
@@ -164,8 +164,8 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_type);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -176,8 +176,8 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_instance);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -186,7 +186,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+ return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -199,7 +199,7 @@ static ssize_t numa_node_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+ return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
@@ -212,9 +212,8 @@ static ssize_t server_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -226,9 +225,8 @@ static ssize_t client_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_pending);
@@ -240,9 +238,8 @@ static ssize_t server_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_latency);
@@ -254,9 +251,8 @@ static ssize_t client_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_latency);
@@ -268,9 +264,8 @@ static ssize_t server_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_conn_id);
@@ -282,9 +277,8 @@ static ssize_t client_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_conn_id);
@@ -303,7 +297,7 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
@@ -321,7 +315,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -340,7 +334,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -359,7 +353,7 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -378,7 +372,7 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -396,7 +390,7 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
@@ -414,7 +408,7 @@ static ssize_t in_read_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
@@ -432,7 +426,7 @@ static ssize_t in_write_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
@@ -451,7 +445,7 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -470,7 +464,7 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
@@ -480,7 +474,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
- int buf_size = PAGE_SIZE, n_written, tot_written;
+ int n_written;
struct list_head *cur;
if (!channel)
@@ -488,25 +482,21 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
mutex_lock(&vmbus_connection.channel_mutex);
- tot_written = snprintf(buf, buf_size, "%u:%u\n",
- channel->offermsg.child_relid, channel->target_cpu);
+ n_written = sysfs_emit(buf, "%u:%u\n",
+ channel->offermsg.child_relid,
+ channel->target_cpu);
list_for_each(cur, &channel->sc_list) {
- if (tot_written >= buf_size - 1)
- break;
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
- n_written = scnprintf(buf + tot_written,
- buf_size - tot_written,
- "%u:%u\n",
- cur_sc->offermsg.child_relid,
- cur_sc->target_cpu);
- tot_written += n_written;
+ n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
}
mutex_unlock(&vmbus_connection.channel_mutex);
- return tot_written;
+ return n_written;
}
static DEVICE_ATTR_RO(channel_vp_mapping);
@@ -516,7 +506,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -526,7 +516,7 @@ static ssize_t device_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->device_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -551,7 +541,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
device_unlock(dev);
return len;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a6861660cb8c..79870dd7a014 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -536,11 +536,12 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
if (read_write == I2C_SMBUS_READ ||
command == I2C_SMBUS_BLOCK_PROC_CALL) {
- status = i801_get_block_len(priv);
- if (status < 0)
+ len = i801_get_block_len(priv);
+ if (len < 0) {
+ status = len;
goto out;
+ }
- len = status;
data->block[0] = len;
inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 76f79b68cef8..888ca636f3f3 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -324,6 +324,7 @@ static void decode_ISR(unsigned int val)
decode_bits(KERN_DEBUG "ISR", isr_bits, ARRAY_SIZE(isr_bits), val);
}
+#ifdef CONFIG_I2C_PXA_SLAVE
static const struct bits icr_bits[] = {
PXA_BIT(ICR_START, "START", NULL),
PXA_BIT(ICR_STOP, "STOP", NULL),
@@ -342,7 +343,6 @@ static const struct bits icr_bits[] = {
PXA_BIT(ICR_UR, "UR", "ur"),
};
-#ifdef CONFIG_I2C_PXA_SLAVE
static void decode_ICR(unsigned int val)
{
decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ff5c486a1dbb..db0d1ac82910 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2200,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2273,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index bf0df6ee4f78..07fb8d3c037f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
- cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1151,7 +1154,7 @@ retest:
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
- cm_destroy_id_wait_timeout(cm_id);
+ cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e64812c..3e43687a7f6f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ae466e72fc43..255677bc12b2 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5d1010cafed8..7e9b996b47c8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index 99824675ee3f..654abb9ce08e 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_master = {
- .name = "ddr_perf_mode_master",
- .id = X1E80100_MASTER_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@@ -688,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_slave = {
- .name = "ddr_perf_mode_slave",
- .id = X1E80100_SLAVE_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1377,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_acv_perf = {
- .name = "ACV_PERF",
- .num_nodes = 1,
- .nodes = { &ddr_perf_mode_slave },
-};
-
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@@ -1583,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index e7a44929f0da..ac6754a85f35 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3228,30 +3228,33 @@ out:
static void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
/*
* The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
+ * configured with V1 page table (DTE[Mode] = 0 is not supported).
*/
if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ goto disable_snp;
}
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) {
- pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ goto disable_snp;
}
pr_info("IOMMU SNP support enabled.\n");
+ return;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
- }
+disable_snp:
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
#endif
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d35c1b8c8e65..e692217fcb28 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5ed036225e69..41f93c3ab160 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1139,7 +1139,8 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* requires a breaking update, zero the V bit, write all qwords
* but 0, then set qword 0
*/
- unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
+ unused_update.data[0] = entry->data[0] &
+ cpu_to_le64(~STRTAB_STE_0_V);
entry_set(smmu, sid, entry, &unused_update, 0, 1);
entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
entry_set(smmu, sid, entry, target, 0, 1);
@@ -1453,14 +1454,17 @@ static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
-static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
- target->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
}
static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
@@ -1523,6 +1527,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
&pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
u64 vtcr_val;
+ struct arm_smmu_device *smmu = master->smmu;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
@@ -1531,9 +1536,11 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS,
- master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
- FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
@@ -1560,7 +1567,8 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
* This can safely directly manipulate the STE memory without a sync sequence
* because the STE table has not been installed in the SMMU yet.
*/
-static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
+static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *strtab,
unsigned int nent)
{
unsigned int i;
@@ -1569,7 +1577,7 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
if (disable_bypass)
arm_smmu_make_abort_ste(strtab);
else
- arm_smmu_make_bypass_ste(strtab);
+ arm_smmu_make_bypass_ste(smmu, strtab);
strtab++;
}
}
@@ -1597,7 +1605,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2637,8 +2645,9 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- arm_smmu_make_bypass_ste(&ste);
+ arm_smmu_make_bypass_ste(master->smmu, &ste);
return arm_smmu_attach_dev_ste(dev, &ste);
}
@@ -3264,7 +3273,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
return 0;
}
@@ -3777,6 +3786,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
+ if (reg & IDR1_ATTR_TYPES_OVR)
+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
+
/* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
@@ -3992,7 +4004,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
* STE table is not programmed to HW, see
* arm_smmu_initial_bypass_stes()
*/
- arm_smmu_make_bypass_ste(
+ arm_smmu_make_bypass_ste(smmu,
arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 23baf117e7e4..2a19bb63e5c6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -44,6 +44,7 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
+#define IDR1_ATTR_TYPES_OVR (1 << 27)
#define IDR1_CMDQS GENMASK(25, 21)
#define IDR1_EVTQS GENMASK(20, 16)
#define IDR1_PRIQS GENMASK(15, 11)
@@ -647,6 +648,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
+#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 50eb9aed47cc..a7ecd90303dc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
- ret = device_rbtree_insert(iommu, info);
- if (ret)
- goto free;
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock);
- device_rbtree_remove(info);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index cf43e798eca4..44083d01852d 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
- iommu_pmu_set_filter(pasid, event->attr.config1,
+ iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index c1bed89b1026..ee3b469e2da1 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 098869007c69..a95a483def2d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3354,6 +3354,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
{
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
void *curr;
int ret;
@@ -3363,10 +3364,18 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
+ for_each_group_device(group, device) {
+ if (pasid >= device->dev->iommu->max_pasids) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075df49..76656fe0470d 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b8c47f18bc26..6a2707fe7a78 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a9fa2a54dc9b..d6e4002200bd 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_v1_bind,
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a55528469278..4b021a67bdfe 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -316,7 +316,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
return 0;
}
#else
-static void armada_370_xp_msi_reenable_percpu(void) {}
+static __maybe_unused void armada_370_xp_msi_reenable_percpu(void) {}
static inline int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index fca888b36680..5f7d3db3afd8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -786,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@@ -798,6 +799,11 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
+ /*
+ * Unmapping a VPE is self-synchronizing on GICv4.1,
+ * no need to issue a VSYNC.
+ */
+ vpe = NULL;
}
goto out;
@@ -832,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -4561,13 +4567,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 2776ca5fc33f..b215b28cad7b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
- level, optname, len);
+ level, optname, optlen);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(int))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 37b9f8f1ae1a..7f3dc8ee6ab8 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4221,7 +4221,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 00c9b9c05001..3a989efae142 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -8,33 +8,14 @@
#include "murmurhash3.h"
+#include <asm/unaligned.h>
+
static inline u64 rotl64(u64 x, s8 r)
{
return (x << r) | (x >> (64 - r));
}
#define ROTL64(x, y) rotl64(x, y)
-static __always_inline u64 getblock64(const u64 *p, int i)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- return p[i];
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- return __builtin_bswap64(p[i]);
-#else
-#error "can't figure out byte order"
-#endif
-}
-
-static __always_inline void putblock64(u64 *p, int i, u64 value)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- p[i] = value;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- p[i] = __builtin_bswap64(value);
-#else
-#error "can't figure out byte order"
-#endif
-}
/* Finalization mix - force all bits of a hash block to avalanche */
@@ -60,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
const u64 c1 = 0x87c37b91114253d5LLU;
const u64 c2 = 0x4cf5ad432745937fLLU;
+ u64 *hash_out = out;
+
/* body */
const u64 *blocks = (const u64 *)(data);
@@ -67,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
int i;
for (i = 0; i < nblocks; i++) {
- u64 k1 = getblock64(blocks, i * 2 + 0);
- u64 k2 = getblock64(blocks, i * 2 + 1);
+ u64 k1 = get_unaligned_le64(&blocks[i * 2]);
+ u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]);
k1 *= c1;
k1 = ROTL64(k1, 31);
@@ -154,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
break;
default:
break;
- };
+ }
}
/* finalization */
@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
h1 += h2;
h2 += h1;
- putblock64((u64 *)out, 0, h1);
- putblock64((u64 *)out, 1, h2);
+ put_unaligned_le64(h1, &hash_out[0]);
+ put_unaligned_le64(h2, &hash_out[1]);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 56aa2a8b9d71..7d0746b37c8e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- fput(bdev_file);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- fput(td->dm_dev.bdev_file);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index be8ac24f50b6..7b8a71ca66dd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- free_r1bio(r1_bio);
+ mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 4c34344dc7dc..d7027d600208 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -50,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
@@ -65,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index f47c98faf068..2073781ccadb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -404,6 +408,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index 849b89dd205c..85b2c0d3d8bc 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -241,6 +241,7 @@ struct mtk_vcodec_dec_ctx {
*
* @dec_mutex: decoder hardware lock
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @decode_workqueue: decode work queue
*
* @irqlock: protect data access by irq handler and work thread
@@ -282,6 +283,7 @@ struct mtk_vcodec_dec_dev {
/* decoder hardware mutex lock */
struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *decode_workqueue;
spinlock_t irqlock;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 06ed47df693b..21836dd6ef85 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
- ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
+ ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
index 19407f9bc773..987b3d71b662 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
@@ -449,7 +449,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
inst->cur_fb = fb;
- dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_dma = (uint64_t)bs->dma_addr;
dec->bs_sz = bs->size;
dec->cur_y_fb_dma = y_fb_dma;
dec->cur_c_fb_dma = c_fb_dma;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
index 55355fa70090..039082f600c8 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
@@ -16,6 +16,7 @@
#include "../vdec_drv_base.h"
#include "../vdec_vpu_if.h"
+#define VP9_MAX_SUPER_FRAMES_NUM 8
#define VP9_SUPER_FRAME_BS_SZ 64
#define MAX_VP9_DPB_SIZE 9
@@ -133,11 +134,11 @@ struct vp9_sf_ref_fb {
*/
struct vdec_vp9_vsi {
unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
- struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_SUPER_FRAMES_NUM];
int sf_next_ref_fb_idx;
unsigned int sf_frm_cnt;
- unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
- unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_offset[VP9_MAX_SUPER_FRAMES_NUM];
+ unsigned int sf_frm_sz[VP9_MAX_SUPER_FRAMES_NUM];
unsigned int sf_frm_idx;
unsigned int sf_init;
struct vdec_fb fb;
@@ -526,7 +527,7 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
/* if this super frame and it is not last sub-frame, get next fb for
* sub-frame decode
*/
- if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt)
vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
}
@@ -735,7 +736,7 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
struct vdec_vp9_vsi *vsi) {
- if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ if (vsi->sf_frm_idx > VP9_MAX_SUPER_FRAMES_NUM) {
mtk_vdec_err(inst->ctx, "Invalid vsi->sf_frm_idx=%u.", vsi->sf_frm_idx);
return -EIO;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index cf48d09b78d7..eea709d93820 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -1074,7 +1074,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
unsigned int mi_row;
unsigned int mi_col;
unsigned int offset;
- unsigned int pa;
+ dma_addr_t pa;
unsigned int size;
struct vdec_vp9_slice_tiles *tiles;
unsigned char *pos;
@@ -1109,7 +1109,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
pos = va + offset;
end = va + bs->size;
/* truncated */
- pa = (unsigned int)bs->dma_addr + offset;
+ pa = bs->dma_addr + offset;
tb = instance->tile.va;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index 82e57ae983d5..da6be556727b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
struct mtk_vcodec_dec_ctx *ctx;
int ret = false;
+ mutex_lock(&dec_dev->dev_ctx_lock);
list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&dec_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 6319f24bc714..3cb8a1622222 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index a042f607ed8d..0bd85d0fb379 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
*
* @enc_mutex: encoder hardware lock.
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @encode_workqueue: encode work queue
*
* @enc_irq: h264 encoder irq resource
@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
/* encoder hardware mutex lock */
struct mutex enc_mutex;
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *encode_workqueue;
int enc_irq;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 84ad1cc6ad17..51bb7ee141b9 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
struct mtk_vcodec_enc_ctx *ctx;
int ret = false;
+ mutex_lock(&enc_dev->dev_ctx_lock);
list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&enc_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 1a64364700eb..0ad2ff9065aa 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 572333ead5fb..4bd4f32bcdab 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -758,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -782,6 +773,15 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
if (cdata == &at24_data_spd)
at24_probe_temp_sensor(client);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b5757993c9b2..c39718042e2e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 6c9f00bcb94b..b543e6b9f3cf 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -400,25 +400,40 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
- ret = mei_restart(mei_dev);
+ ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index ecfb70cd057c..e6a98dba8a73 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@@ -384,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
*/
@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- atomic_inc(&tp->assert_cnt);
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- wake_up(&tp->xfer_wait);
-
- if (tp->event_notify)
- tp->event_notify(tp->event_notify_context);
-
- return IRQ_HANDLED;
-}
-
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@@ -490,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
- vsc_tp_thread_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), tp);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
if (ret)
return ret;
@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
+ free_irq(spi->irq, tp);
+
return ret;
}
@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
index f9513ddc3e40..14ca195cbddc 100644
--- a/drivers/misc/mei/vsc-tp.h
+++ b/drivers/misc/mei/vsc-tp.h
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 64a3492e8002..90c51b12148e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -413,7 +413,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -488,7 +488,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->flags & MMC_BLK_IOC_DROP)
return 0;
- if (idata->flags & MMC_BLK_IOC_SBC)
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/*
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b88d6dec209f..9a5f75163aca 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -300,6 +300,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
remain = sgm->length;
if (remain > host->data_len)
remain = host->data_len;
+ sgm->consumed = 0;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 088f8ed4fdc4..a8ee0df47148 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1114,10 +1114,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
- if (slot->vsd)
- gpiod_set_value(slot->vsd, power_on);
- if (slot->vio)
- gpiod_set_value(slot->vio, power_on);
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1254,18 +1269,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
- id, GPIOD_OUT_LOW);
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
- slot->vio = gpiod_get_index_optional(host->dev, "vio",
- id, GPIOD_OUT_LOW);
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
- slot->cover = gpiod_get_index_optional(host->dev, "cover",
- id, GPIOD_IN);
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@@ -1379,13 +1394,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
- host->slot_switch = gpiod_get_optional(host->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(host->slot_switch))
- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
- "error looking up slot switch GPIO\n");
-
-
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1404,6 +1412,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0aceeeba..e113b99a3eab 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index ab4b964d4058..f2e4a93ed1d6 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -626,6 +626,7 @@ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
/* perform tuning */
sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
host->tuning_err = __sdhci_execute_tuning(host, opcode);
if (host->tuning_err) {
/* disable auto-tuning upon tuning error */
@@ -999,6 +1000,17 @@ free_pltfm:
return err;
}
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
@@ -1006,8 +1018,14 @@ static void dwcmshc_remove(struct platform_device *pdev)
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct rk35xx_priv *rk_priv = priv->priv;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
sdhci_remove_host(host, 0);
+ dwcmshc_disable_card_clk(host);
+
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
@@ -1099,17 +1117,6 @@ static void dwcmshc_enable_card_clk(struct sdhci_host *host)
}
}
-static void dwcmshc_disable_card_clk(struct sdhci_host *host)
-{
- u16 ctrl;
-
- ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- if (ctrl & SDHCI_CLOCK_CARD_EN) {
- ctrl &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
- }
-}
-
static int dwcmshc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index e78faef67d7a..94076b095571 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1439,6 +1439,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97a00ec9a4d4..caacdc0a3819 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -209,7 +209,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->bdev_file) {
invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
- fput(dev->bdev_file);
+ bdev_fput(dev->bdev_file);
}
kfree(dev);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5887feb347a4..0de87bc63840 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -900,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a8d12c71f987..1b2ec0fec60c 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -857,7 +857,7 @@ static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
struct brcmnand_soc *soc = ctrl->soc;
int i;
- if (soc->read_data_bus) {
+ if (soc && soc->read_data_bus) {
soc->read_data_bus(soc, flash_cache, buffer, fc_words);
} else {
for (i = 0; i < fc_words; i++)
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5243fab9face..8db7fc424571 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1554,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c84d3..b8cff9240b28 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 767f66c37f6b..8090390edaf9 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -950,20 +950,173 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_unlock(&priv->reg_mutex);
}
-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
- * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
- * must only be propagated to C-VLAN and MAC Bridge components. That means
- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
- * these frames are supposed to be processed by the CPU (software). So we make
- * the switch only forward them to the CPU port. And if received from a CPU
- * port, forward to a single port. The software is responsible of making the
- * switch conform to the latter by setting a single port as destination port on
- * the special tag.
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
*
- * This switch intellectual property cannot conform to this part of the standard
- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
- * DAs, it also includes :22-FF which the scope of propagation is not supposed
- * to be restricted for these MAC DAs.
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@@ -971,35 +1124,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_BPC,
+ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_BPDU_FR |
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
- MT753X_R01_PORT_FW_MASK,
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC1,
+ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_BPDU_FR |
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
- MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC2,
+ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_BPDU_FR |
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static void
@@ -1722,14 +1883,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@@ -2268,8 +2431,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- mt7530_pll_setup(priv);
-
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2285,6 +2446,9 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2318,6 +2482,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@@ -2429,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2504,18 +2674,25 @@ mt7531_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR;
+ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
+
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index d17b318e6ee4..a08053390b28 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -32,6 +32,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@@ -65,6 +69,7 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
+#define MT753X_PAE_BPDU_FR BIT(25)
#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
@@ -75,20 +80,24 @@ enum mt753x_id {
/* Register for :01 and :02 MAC DA frame control */
#define MT753X_RGAC1 0x28
+#define MT753X_R02_BPDU_FR BIT(25)
#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_BPDU_FR BIT(9)
#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_BPDU_FR BIT(25)
#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_BPDU_FR BIT(9)
#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
@@ -616,6 +625,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9ed1821184ec..59b5dd0e2f41 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
@@ -5503,8 +5551,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6020",
.num_databases = 64,
- .num_ports = 4,
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
.num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
.max_vid = 4095,
.port_base_addr = 0x8,
.phy_base_addr = 0x0,
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347cbc..ddadeb9bfdae 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 833e55e4b961..52ddb4ef259e 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 9e9e4a03f1a8..2d8a66ea82fa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -351,7 +351,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 09e7da1a69c9..be5acfa41ee0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -718,8 +718,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -739,10 +742,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -3481,10 +3489,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3497,27 +3506,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index 337c435d3ce9..5b175e7e92a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
- return rc;
+ goto err;
ena_tx_ctx.req_id = req_id;
@@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
error_unmap_dma:
ena_unmap_tx_buff(tx_ring, tx_info);
+err:
tx_info->xdpf = NULL;
+
return rc;
}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9662ee72814c..536635e57727 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -593,6 +593,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@@ -607,7 +617,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pci_reset_function(pdsc->pdev);
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 92d7657dd614..a3e17a0c187a 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@@ -313,5 +314,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e494e1298dc9..495ef4ef8c10 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index ab6133e7db42..660268ff9562 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -239,6 +239,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index dd06b68b33ed..82768b0e9026 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -392,7 +392,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -412,6 +414,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -430,13 +434,10 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -479,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -658,6 +665,12 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
active = phy_init_eee(phydev, 0) >= 0;
@@ -788,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -876,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -1035,19 +1051,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
- } else if (!intf->wolopts) {
- ret = phy_resume(dev->phydev);
- if (ret)
- goto err_phy_disable;
}
umac_reset(intf);
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
@@ -1062,9 +1071,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1306,7 +1312,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
@@ -1324,7 +1337,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct net_device *dev = intf->ndev;
- int ret = 0;
if (!netif_running(dev))
return 0;
@@ -1334,10 +1346,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
bcmasp_netif_deinit(dev);
if (!intf->wolopts) {
- ret = phy_suspend(dev->phydev);
- if (ret)
- goto out;
-
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
else
@@ -1354,11 +1362,7 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
clk_disable_unprepare(intf->parent->clk);
- return ret;
-
-out:
- bcmasp_netif_init(dev, false);
- return ret;
+ return 0;
}
static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..1be6d14030bc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 493b724848c8..2c2ee79c4d77 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1778,7 +1778,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1788,7 +1788,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1804,7 +1804,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1815,7 +1815,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -2094,11 +2094,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -2121,9 +2118,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2134,29 +2129,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2234,6 +2221,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2280,7 +2272,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -9089,7 +9081,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -11758,6 +11750,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
bnxt_cfg_usr_fltrs(bp);
@@ -13035,6 +13029,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -13048,12 +13052,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -15371,6 +15370,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -15379,16 +15379,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@@ -15472,6 +15483,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 93f9bd55020f..195c02dc0683 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -210,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -392,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7396e2823e32..b1f84b37032a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3280,7 +3280,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
}
/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
{
unsigned int i;
u32 reg;
@@ -3305,6 +3305,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
udelay(10);
bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
return dma_ctrl;
}
@@ -3368,8 +3376,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ /* Disable RX/TX DMA and flush TX and RX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv, true);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
@@ -4235,7 +4243,7 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
/* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ dma_ctrl = bcmgenet_dma_disable(priv, false);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d7693fdf640d..8bd213da8fb6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2454,8 +2454,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = true;
-
phy_attached_info(phy_dev);
return 0;
@@ -2467,10 +2465,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
bool suppress_preamble = false;
+ struct phy_device *phydev;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
+ int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2584,6 +2584,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
goto err_out_free_mdiobus;
of_node_put(node);
+ /* find all the PHY devices on the bus and set mac_managed_pm to true */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phydev = mdiobus_get_phy(fep->mii_bus, addr);
+ if (phydev)
+ phydev->mac_managed_pm = true;
+ }
+
mii_cnt++;
/* save fec0 mii_bus */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index f3c9395d8351..618f66d9586b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 999a0ee162a6..941cb529d671 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+#define HNS3_NIC_LB_TEST_UNEXECUTED 4
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
@@ -418,18 +421,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
+ int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
+ int i;
+
+ /* initialize the loopback test result, avoid marking an unexcuted
+ * loopback test as PASS.
+ */
+ for (i = 0; i < cnt; i++)
+ data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
- return;
+ goto failure;
}
if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
- return;
+ goto failure;
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test start\n");
@@ -451,6 +462,10 @@ static void hns3_self_test(struct net_device *ndev,
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test end\n");
+ return;
+
+failure:
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b4afb66efe5c..ff6a2ed23ddb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11626,6 +11626,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_pci_uninit;
+ devl_lock(hdev->devlink);
+
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
@@ -11805,6 +11807,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ devl_unlock(hdev->devlink);
return 0;
err_mdiobus_unreg:
@@ -11817,6 +11820,7 @@ err_msi_uninit:
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
+ devl_unlock(hdev->devlink);
hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1fef6bb5a5fb..4b6e7536170a 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -628,6 +628,7 @@ struct e1000_phy_info {
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
+ u32 retry_count;
enum e1000_media_type media_type;
@@ -644,6 +645,7 @@ struct e1000_phy_info {
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
+ bool retry_enabled;
};
struct e1000_nvm_info {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 19e450a5bd31..f9e94be36e97 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -222,11 +222,18 @@ out:
if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
goto out;
}
+ /* There is no guarantee that the PHY is accessible at this time
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
+ e1000e_enable_phy_retry(hw);
+
hw->phy.ops.release(hw);
if (!ret_val) {
@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->id = e1000_phy_unknown;
+ if (hw->mac.type == e1000_pch_mtp) {
+ phy->retry_count = 2;
+ e1000e_enable_phy_retry(hw);
+ }
+
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val)
return ret_val;
@@ -1146,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1313,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val) {
@@ -1333,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index cc8c531ec3df..3692fce20195 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6623,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6696,6 +6697,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
+
+ /* Force SMBUS to allow WOL */
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 5e329156d1ba..93544f1cc2a5 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
+void e1000e_disable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = false;
+}
+
+void e1000e_enable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = true;
+}
+
/**
* e1000e_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
@@ -118,55 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
**/
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success) {
+ *data = (u16)mdic;
+ return 0;
+ }
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
}
- *data = (u16)mdic;
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
- return 0;
+ return -E1000_ERR_PHY;
}
/**
@@ -179,56 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
- }
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success)
+ return 0;
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index c48777d09523..049bb325b4b1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
+void e1000e_disable_phy_retry(struct e1000_hw *hw);
+void e1000e_enable_phy_retry(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ba24f3fa92c3..2fbabcdb5bb5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -955,6 +955,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ bool in_busy_poll;
int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f86578857e8a..ffb9f9f15c52 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1253,8 +1253,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
int bkt;
int cnt = 0;
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
- ++cnt;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_ACTIVE)
+ ++cnt;
+ }
return cnt;
}
@@ -3911,6 +3914,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
+ /* Set ITR for software interrupts triggered after exiting
+ * busy-loop polling.
+ */
+ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
+ I40E_ITR_20K);
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
@@ -16098,8 +16107,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16641,7 +16650,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 14ab642cafdb..432afbb64201 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -333,8 +333,11 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0d7177083708..1a12b732818e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2630,7 +2630,22 @@ process_next:
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+/**
+ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
+ * @itr_idx: interrupt throttling index
+ * @interval: interrupt throttling interval value in usecs
+ * @force_swint: force software interrupt
+ *
+ * The function builds a value for I40E_PFINT_DYN_CTLN register that
+ * is used to update interrupt throttling interval for specified ITR index
+ * and optionally enforces a software interrupt. If the @itr_idx is equal
+ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
+ * parameter is taken into account. If the interval change and enforced
+ * software interrupt are not requested then the built value just enables
+ * appropriate vector interrupt.
+ **/
+static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
+ bool force_swint)
{
u32 val;
@@ -2644,23 +2659,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* an event in the PBA anyway so we need to rely on the automask
* to hold pending events for us until the interrupt is re-enabled
*
- * The itr value is reported in microseconds, and the register
- * value is recorded in 2 microsecond units. For this reason we
- * only need to shift by the interval shift - 1 instead of the
- * full value.
+ * We have to shift the given value as it is reported in microseconds
+ * and the register value is recorded in 2 microsecond units.
*/
- itr &= I40E_ITR_MASK;
+ interval >>= 1;
+ /* 1. Enable vector interrupt
+ * 2. Update the interval for the specified ITR index
+ * (I40E_ITR_NONE in the register is used to indicate that
+ * no interval update is requested)
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
+
+ /* 3. Enforce software interrupt trigger if requested
+ * (These software interrupts rate is limited by ITR2 that is
+ * set to 20K interrupts per second)
+ */
+ if (force_swint)
+ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
+ I40E_SW_ITR);
return val;
}
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_PFINT_DYN_CTLN
-
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
* update so that it can only happen so often. So after either Tx or Rx are
@@ -2679,8 +2704,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
+ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
struct i40e_hw *hw = &vsi->back->hw;
- u32 intval;
+ u16 interval = 0;
+ u32 itr_val;
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
@@ -2702,8 +2729,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
@@ -2712,25 +2739,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
- q_vector->tx.target_itr);
+ itr_idx = I40E_TX_ITR;
+ interval = q_vector->tx.target_itr;
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), intval);
+ /* Do not update interrupt control register if VSI is down */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ /* Update ITR interval if necessary and enforce software interrupt
+ * if we are exiting busy poll.
+ */
+ if (q_vector->in_busy_poll) {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
+ q_vector->in_busy_poll = false;
+ } else {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
+ }
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
}
/**
@@ -2845,6 +2883,8 @@ tx_only:
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
+ else
+ q_vector->in_busy_poll = true;
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index abf15067eb5d..2cdc7de6301c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -68,6 +68,7 @@ enum i40e_dyn_idx {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 83a34e98bdc7..232b65b9c8ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1624,8 +1624,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
- int i, v;
u32 reg;
+ int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@@ -1636,11 +1636,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@@ -1649,14 +1648,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
- while (v < pf->num_alloc_vfs) {
- vf = &pf->vf[v];
+ while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@@ -1666,7 +1664,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
- v++;
+ ++vf;
}
}
@@ -1676,39 +1674,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
- if (v < pf->num_alloc_vfs)
+ if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- pf->vf[v].vf_id);
+ vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@@ -1717,12 +1715,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_cleanup_reset_vf(&pf->vf[v]);
+ i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);
@@ -3139,11 +3137,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
/* Allow to delete VF primary MAC only if it was not set
* administratively by PF or if VF is trusted.
*/
- if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
- i40e_can_vf_change_mac(vf))
- was_unimac_deleted = true;
- else
- continue;
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+ }
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ef2440f3abf8..166832a4213a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3503,6 +3503,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3559,7 +3587,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 8040317c9561..1f3e7a6903e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -593,8 +593,9 @@ struct ice_aqc_recipe_data_elem {
struct ice_aqc_recipe_to_profile {
__le16 profile_id;
u8 rsvd[6];
- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+ __le64 recipe_assoc;
};
+static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index db4b2844e1f7..d9f6cc71d900 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1002,8 +1002,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- void *mac_buf __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
+ void *mac_buf __free(kfree) = NULL;
u16 mac_buf_len;
int status;
@@ -3272,7 +3272,7 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
@@ -3420,7 +3420,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_hw *hw;
int status;
@@ -3561,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_hw *hw;
int status;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 255a9c8151b4..78b833b3e1d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -941,11 +941,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
+ u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u8 *tx_frame __free(kfree);
int i;
netdev_info(netdev, "loopback test\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index f97128b69f87..f0e76f0a6d60 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -2041,7 +2041,7 @@ int ice_init_lag(struct ice_pf *pf)
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ &recipe_bits, NULL);
if (err)
continue;
@@ -2049,7 +2049,7 @@ int ice_init_lag(struct ice_pf *pf)
recipe_bits |= BIT(lag->pf_recipe) |
BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ recipe_bits, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ee3f0d3e3f6d..558422120312 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3091,7 +3091,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int prev_num_q_vectors = 0;
+ int prev_num_q_vectors;
struct ice_pf *pf;
int ret;
@@ -3105,13 +3105,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- coalesce = kcalloc(vsi->num_q_vectors,
- sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
-
- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
-
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
goto err_vsi_cfg;
@@ -3121,6 +3114,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (ret)
goto err_vsi_cfg;
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (!coalesce)
+ return -ENOMEM;
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
@@ -3139,8 +3139,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
err_vsi_cfg_tc_lan:
ice_vsi_decfg(vsi);
-err_vsi_cfg:
kfree(coalesce);
+err_vsi_cfg:
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index d4baae8c3b72..b4ea935e8300 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2025,12 +2025,12 @@ error_out:
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2042,7 +2042,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
/* Set the recipe ID bit in the bitmask to let the device know which
* profile we are associating the recipe to
*/
- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+ cmd->recipe_assoc = cpu_to_le64(r_assoc);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2051,12 +2051,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* ice_aq_get_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2069,7 +2069,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
return status;
}
@@ -2108,6 +2108,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
static void ice_get_recp_to_prof_map(struct ice_hw *hw)
{
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 i;
for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
@@ -2115,8 +2116,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
continue;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_copy(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
@@ -5390,22 +5392,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
list_for_each_entry(fvit, &rm->fv_list, list_entry) {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 j;
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap, NULL);
+ &recp_assoc, NULL);
if (status)
goto err_unroll;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
goto err_unroll;
+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap,
- NULL);
+ recp_assoc, NULL);
ice_release_change_lock(hw);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index db7e501b7e0a..89ffa1b51b5a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd);
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b890410a2bc0..688ccb0615ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 21d26e19338a..d10a4be965b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -856,6 +856,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@@ -867,11 +872,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
- lockdep_assert_held(&vf->cfg_lock);
-
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -956,14 +956,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 80dc4bcdd3a4..b3e1bdcb80f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
-
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
+ if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ /* setup outer VLAN ops */
+ vlan_ops = &vsi->outer_vlan_ops;
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ } else {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 6dd7a66bb897..f5bc4a278074 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2941,6 +2941,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
+
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
* pass it up the stack as-is.
@@ -2951,8 +2953,6 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 90316dc58630..6bc56c7c181e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -298,6 +298,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
+ struct igc_led_classdev *leds;
};
void igc_up(struct igc_adapter *adapter);
@@ -723,6 +724,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
index bf240c5daf86..3929b25b6ae6 100644
--- a/drivers/net/ethernet/intel/igc/igc_leds.c
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
pci_dev_id(adapter->pdev), index);
}
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
- struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
led_cdev->hw_control_get = igc_led_hw_control_get;
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
- devm_led_classdev_register(&netdev->dev, led_cdev);
+ return led_classdev_register(&netdev->dev, led_cdev);
}
int igc_led_setup(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
struct igc_led_classdev *leds;
- int i;
+ int i, err;
mutex_init(&adapter->led_mutex);
- leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- for (i = 0; i < IGC_NUM_LEDS; i++)
- igc_setup_ldev(leds + i, netdev, i);
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2e1cfbd82f4f..4d975d620a8e 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1642,10 +1642,6 @@ done:
if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* FIXME: add support for retrieving timestamps from
- * the other timer registers before skipping the
- * timestamping request.
- */
unsigned long flags;
u32 tstamp_flags;
@@ -7025,6 +7021,9 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS))
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 13a6fca31004..866024f2b9ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
- if (unlikely(!algo)) {
- err = -ENOENT;
- goto err_xs;
- }
-
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
- xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3c0f55b3e48e..b86f3224f0b7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6b61..e9bf9231b018 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ if (iter >= MAX_LMAC_COUNT)
+ continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d39001cdc707..00af8888e329 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4819,18 +4819,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e350242bbafb..e8b73b9d75e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1657,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
- u16 offset;
+ u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
@@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu);
- kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b40bd0e46751..3f46d5e0fb2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1933,7 +1933,7 @@ int otx2_open(struct net_device *netdev)
* mcam entries are enabled to receive the packets. Hence disable the
* packet I/O.
*/
- if (err == EIO)
+ if (err == -EIO)
goto err_disable_rxtx;
else if (err)
goto err_tx_stop_queues;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 87bdb93cb066..f4655a8c0705 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -689,6 +689,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -697,12 +698,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d22a..1723e9912ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index c895e265ae0e..61334a71058c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@@ -1093,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
- mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
- if (!dev->running)
- return;
-
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 86f1854698b4..883c044852f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
+mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc];
+}
+
+static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
- return fifo->data[fifo->mask & fifo->cc++];
+ fifo->cc++;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e87e26f2c669..6743806b8480 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
-
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
+
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 0ab9db319530..22918b2ef7f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ rtnl_lock();
mlx5e_activate_txqsq(sq);
+ rtnl_unlock();
+
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
else
@@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
+ rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
+ rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ rtnl_lock();
mlx5e_activate_priv_channels(priv);
+ rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index bcafb4bf9415..8d9a3b5ec973 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size);
}
+#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
+
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
+{
+ return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index e0bc30308c77..2f9e04a8418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
unsigned int num_rqns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926340..f66bbc846464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab86c..cc9bcc420032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c7f542d0b8f0..93cf23278d93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -46,6 +46,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc51ce16df14..67a29826bb57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -451,6 +451,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ if (mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Requested number of channels (%d) exceeds the maximum allowed by the XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto out;
+ }
+ }
+
+ /* If RXFH is configured, changing the channels number is allowed only if
+ * it does not require resizing the RSS table. This is because the previous
+ * configuration may no longer be compatible with the new RSS table.
+ */
+ if (netif_is_rxfh_configured(priv->netdev)) {
+ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
+ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
+
+ if (new_rqt_size != cur_rqt_size) {
+ err = -EINVAL;
+ netdev_err(priv->netdev,
+ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
+ __func__, new_rqt_size, cur_rqt_size);
+ goto out;
+ }
+ }
+
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
@@ -561,12 +589,12 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
static void
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int tc;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@@ -580,11 +608,11 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
static void
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
@@ -1281,17 +1309,30 @@ int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
+ unsigned int count;
int err;
mutex_lock(&priv->state_lock);
+
+ count = priv->channels.params.num_channels;
+
+ if (hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto unlock;
+ }
+ }
+
if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- unsigned int count = priv->channels.params.num_channels;
-
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err)
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 91848eae4565..319930c04093 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -209,8 +209,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
*data,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR_OR_NULL(priv->devcom))
- return -EOPNOTSUPP;
+ if (IS_ERR(priv->devcom))
+ return PTR_ERR(priv->devcom);
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -5726,9 +5726,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2fa076b23fbe..e21a3b4128ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
+
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3047d7015c52..1789800faaeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_esw_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
+ dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
+ esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b0a0..844d3e3a65dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,6 +43,7 @@
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
@@ -2476,6 +2477,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
+ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
@@ -3055,7 +3060,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
key,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (IS_ERR(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3707,6 +3712,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6bfa7e4f146..cf085a478e3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
+ struct mlx5_pkt_reformat *p2)
+{
+ return p1->owner == p2->owner &&
+ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
+ p1->id == p2->id :
+ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
+ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+}
+
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.pkt_reformat->id ==
- d2->vport.pkt_reformat->id) : true)) ||
+ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
+ d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d14459e5c04f..69d482f7c5a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e7d59cfa8708..7b0766c89f4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devc))
- return NULL;
+ return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, key, handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index 5b28084e8a03..dd5d186dc614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
sd = mlx5_get_sd(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
sd->group_id, NULL, dev);
- if (!devcom)
- return -ENOMEM;
+ if (IS_ERR(devcom))
+ return PTR_ERR(devcom);
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c2593625c09a..331ce47f51a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
mlx5_query_nic_system_image_guid(dev),
NULL, dev);
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (IS_ERR(dev->priv.hca_devcom_comp))
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
}
@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ err = mlx5_hwmon_dev_register(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
+ devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
+ if (err)
+ devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
+ mlx5_hwmon_dev_unregister(dev);
+ mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unregister(devlink);
devl_unlock(devlink);
}
@@ -1680,16 +1694,23 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
}
devl_lock(devlink);
+ devl_register(devlink);
+
err = mlx5_devlink_params_register(priv_to_devlink(dev));
- devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto query_hca_caps_err;
+ goto params_reg_err;
}
+ devl_unlock(devlink);
return 0;
+params_reg_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
query_hca_caps_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
@@ -1702,6 +1723,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@@ -1943,16 +1965,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
- err = mlx5_crdump_enable(dev);
- if (err)
- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
-
- err = mlx5_hwmon_dev_register(dev);
- if (err)
- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
-
pci_save_state(pdev);
- devlink_register(devlink);
return 0;
err_init_one:
@@ -1973,16 +1986,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
- * devlink notify APIs.
- * Hence, we must drain them before unregistering the devlink.
- */
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
- devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_hwmon_dev_unregister(dev);
- mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 4dcf995cb1a2..6bac8ad70ba6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
+ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- int offset = 1;
+ int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index bc863e1f062e..7ebe71280827 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto peer_devlink_set_err;
}
- devlink_register(devlink);
return 0;
peer_devlink_set_err:
@@ -101,7 +100,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
- devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 64f4cc284aea..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -205,12 +205,11 @@ dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
}
static int
-dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
struct mlx5dr_rule_action_member *action_mem)
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
int ret;
@@ -488,10 +487,9 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
}
static int
-dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
int ret;
@@ -522,7 +520,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
}
static int
-dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
+ struct mlx5dr_rule_rx_tx *rule_rx_tx,
bool is_rx, const u64 rule_id, u8 format_ver)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
@@ -533,7 +532,7 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
while (i--) {
- ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
format_ver);
if (ret < 0)
return ret;
@@ -542,7 +541,8 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
}
-static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+static noinline_for_stack int
+dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
@@ -565,19 +565,19 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
return ret;
if (rx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
if (ret < 0)
return ret;
}
if (tx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
if (ret < 0)
return ret;
}
list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
- ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
if (ret < 0)
return ret;
}
@@ -586,10 +586,10 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
}
static int
-dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+dr_dump_matcher_mask(struct seq_file *file, char *buff,
+ struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
int ret;
@@ -681,10 +681,10 @@ dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
}
static int
-dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+dr_dump_matcher_builder(struct seq_file *file, char *buff,
+ struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -702,11 +702,10 @@ dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
}
static int
-dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -731,7 +730,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
- ret = dr_dump_matcher_builder(file,
+ ret = dr_dump_matcher_builder(file, buff,
&matcher_rx_tx->ste_builder[i],
i, is_rx, matcher_id);
if (ret < 0)
@@ -741,7 +740,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
@@ -763,19 +762,19 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
if (ret)
return ret;
- ret = dr_dump_matcher_mask(file, &matcher->mask,
+ ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
matcher->match_criteria, matcher_id);
if (ret < 0)
return ret;
if (rx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
if (ret < 0)
return ret;
}
if (tx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
if (ret < 0)
return ret;
}
@@ -803,11 +802,10 @@ dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
}
static int
-dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
int ret;
@@ -829,7 +827,8 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+static noinline_for_stack int
+dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
@@ -848,14 +847,14 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
return ret;
if (rx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, true, rx,
+ ret = dr_dump_table_rx_tx(file, buff, true, rx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
if (tx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, false, tx,
+ ret = dr_dump_table_rx_tx(file, buff, false, tx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
@@ -881,10 +880,10 @@ static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
}
static int
-dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+dr_dump_send_ring(struct seq_file *file, char *buff,
+ struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -902,13 +901,13 @@ dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
return 0;
}
-static noinline_for_stack int
+static int
dr_dump_domain_info_flex_parser(struct seq_file *file,
+ char *buff,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -925,11 +924,11 @@ dr_dump_domain_info_flex_parser(struct seq_file *file,
return 0;
}
-static noinline_for_stack int
-dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+static int
+dr_dump_domain_info_caps(struct seq_file *file, char *buff,
+ struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
int ret;
@@ -969,34 +968,35 @@ dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
}
static int
-dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+dr_dump_domain_info(struct seq_file *file, char *buff,
+ struct mlx5dr_domain_info *info,
const u64 domain_id)
{
int ret;
- ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
info->caps.flex_parser_id_icmp_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
info->caps.flex_parser_id_icmp_dw1,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
info->caps.flex_parser_id_icmpv6_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
info->caps.flex_parser_id_icmpv6_dw1,
domain_id);
if (ret < 0)
@@ -1032,12 +1032,12 @@ dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
if (ret)
return ret;
- ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
if (ret < 0)
return ret;
if (dmn->info.supp_sw_steering) {
- ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3d09fa54598f..ba303868686a 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
@@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
control |= MLXBF_GIGE_CONTROL_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
- err = mlxbf_gige_request_irqs(priv);
- if (err)
- return err;
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
if (err)
- goto free_irqs;
+ return err;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_tx_init(priv);
if (err)
- goto free_irqs;
+ goto phy_deinit;
err = mlxbf_gige_rx_init(priv);
if (err)
goto tx_deinit;
@@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
napi_enable(&priv->napi);
netif_start_queue(netdev);
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ goto napi_deinit;
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
return 0;
+napi_deinit:
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ mlxbf_gige_rx_deinit(priv);
+
tx_deinit:
mlxbf_gige_tx_deinit(priv);
-free_irqs:
- mlxbf_gige_free_irqs(priv);
+phy_deinit:
+ phy_stop(phydev);
return err;
}
@@ -485,8 +493,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
- writeq(0, priv->base + MLXBF_GIGE_INT_EN);
- mlxbf_gige_clean_port(priv);
+ rtnl_lock();
+ netif_device_detach(priv->netdev);
+
+ if (netif_running(priv->netdev))
+ dev_close(priv->netdev);
+
+ rtnl_unlock();
}
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e4d7739bd7c8..4a79c0d7e7ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -849,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7ae4e..6c06b0592760 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index af99bf17eb36..f42a1b1c9368 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1530,7 +1530,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool pci_reset_supported;
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1548,11 +1548,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
- &pci_reset_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index f20052776b3f..92a406f02eae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -726,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1549,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1575,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1591,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf448497d..79a1d8606512 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363aff..31f75b4a67fd 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b8f5..d4cdf3d4f552 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,16 +232,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __netif_rx(skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
unsigned long flags;
unsigned int status;
+ local_bh_disable();
+
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ local_bh_enable();
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2a7f29854267..381b9cd285eb 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 2f803377c9f9..670c1de966db 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index bd8aa83b47e5..75a988c0bd79 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -25,6 +25,8 @@
#define PCS_POWER_STATE_DOWN 0x6
#define PCS_POWER_STATE_UP 0x4
+#define RFE_RD_FIFO_TH_3_DWORDS 0x3
+
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
@@ -3272,6 +3274,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
lan743x_pci_cleanup(adapter);
}
+static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
+{
+ u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
+
+ if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
+ u32 misc_ctl;
+
+ misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
+ misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
+ misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
+ RFE_RD_FIFO_TH_3_DWORDS);
+ lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
+ }
+}
+
static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
@@ -3287,6 +3304,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
+ pci11x1x_set_rfe_rd_fifo_threshold(adapter);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index be79cb0ae5af..645bc048e52e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,7 @@
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
+#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0)
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
@@ -311,6 +312,9 @@
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+#define MISC_CTL_0 (0x920)
+#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4)
+
/* Vendor Specific SGMII MMD details */
#define SR_VSMMD_PCS_ID1 0x0004
#define SR_VSMMD_PCS_ID2 0x0005
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 3a1b1a1f5a19..60dd2fd603a8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 523e0c470894..55f255a3c9db 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum { /* key / mask */
+ FRAG_NOT = 0x03, /* 0 / 3 */
+ FRAG_SOME = 0x11, /* 1 / 1 */
+ FRAG_FIRST = 0x13, /* 1 / 3 */
+ FRAG_LATER = 0x33, /* 3 / 3 */
+ FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
+ /* 0/0 0/1 1/0 1/1 <-- first_frag */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
+ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+ /* Lookup verdict based on the 2 + 2 input bits */
+ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+ if (vdt == FRAG_INVAL) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Match on invalid fragment flag combination");
+ return -EINVAL;
}
+ /* Extract VCAP fragment key and mask from verdict */
+ value = (vdt >> 4) & 0x3;
+ mask = vdt & 0x3;
+
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 59287c6e6cee..d8af5e7e15b4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+ *datasize = mtu + ETH_HLEN;
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 4c043052198d..00882ffc7a02 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -73,6 +73,7 @@ enum mac_version {
};
struct rtl8169_private;
+struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@@ -84,7 +85,8 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len);
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
-void rtl8168_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
-void rtl8125_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 7c5dc9d0df85..e10bee706bc6 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -146,22 +146,22 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8168_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
+
+ return leds;
}
static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
@@ -245,20 +245,31 @@ static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8125_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8125_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8125_NUM_LEDS; i++)
rtl8125_setup_led_ldev(leds + i, ndev, i);
+
+ return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+ if (!leds)
+ return;
+
+ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+ led_classdev_unregister(&l->led);
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5c879a5c86d7..0fc5fe564ae5 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -647,6 +647,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
+ struct r8169_led_classdev *leds;
+
u32 ocp_base;
};
@@ -1314,17 +1316,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
+static void rtl_dash_loop_wait(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long usecs, int n, bool high)
+{
+ if (!tp->dash_enabled)
+ return;
+ rtl_loop_wait(tp, c, usecs, n, high);
+}
+
+static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, true);
+}
+
+static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, false);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1338,7 +1363,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1346,7 +1371,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -5021,6 +5046,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ if (IS_ENABLED(CONFIG_R8169_LEDS))
+ r8169_remove_leds(tp->leds);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@@ -5141,6 +5169,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
struct mii_bus *new_bus;
int ret;
+ /* On some boards with this chip version the BIOS is buggy and misses
+ * to reset the PHY page selector. This results in the PHY ID read
+ * accessing registers on a different page, returning a more or
+ * less random value. Fix this by resetting the page selector first.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_26)
+ r8169_mdio_write(tp, 0x1f, 0);
+
new_bus = devm_mdiobus_alloc(&pdev->dev);
if (!new_bus)
return -ENOMEM;
@@ -5469,9 +5506,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ENABLED(CONFIG_R8169_LEDS)) {
if (rtl_is_8125(tp))
- rtl8125_init_leds(dev);
+ tp->leds = rtl8125_init_leds(dev);
else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- rtl8168_init_leds(dev);
+ tp->leds = rtl8168_init_leds(dev);
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d1be030c8848..9b1f639f64a1 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
- u16 pkt_len;
+ u16 desc_len;
u8 die_dt;
int entry;
int limit;
int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->rx_ring[q].desc[entry];
- for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
- pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+ desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
- if (!pkt_len)
+ if (!desc_len)
continue;
if (desc_status & MSC_MC)
@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
switch (die_dt) {
case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, pkt_len);
+ skb_put(skb, desc_len);
skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += desc_len;
break;
case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, pkt_len);
+ skb_put(priv->rx_1st_skb, desc_len);
break;
case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
break;
case DT_FEND:
@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
+ ravb_rx_csum_gbeth(priv->rx_1st_skb);
+ stats->rx_bytes += priv->rx_1st_skb->len;
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
break;
}
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q].ex_desc[entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -1324,12 +1320,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ bool unmask;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ unmask = !ravb_rx(ndev, &quota, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1339,6 +1335,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+
+ if (!unmask)
+ goto out;
+
napi_complete(napi);
/* Re-enable RX/TX interrupts */
@@ -1352,14 +1360,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
- /* Receive error message handling */
- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- if (info->nc_queues)
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors)
- ndev->stats.rx_over_errors = priv->rx_over_errors;
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
out:
return budget - quota;
}
@@ -2722,19 +2722,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
- const char *dev_name;
+ const char *devname = dev_name(dev);
unsigned long flags;
int error, irq_num;
if (irq_name) {
- dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!dev_name)
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
flags = 0;
} else {
- dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
flags = IRQF_SHARED;
}
@@ -2744,9 +2743,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
if (irq)
*irq = irq_num;
- error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
return error;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 475e1e8c1d35..0786eb0da391 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -50,7 +50,7 @@
* the macros available to do this only define GCC 8.
*/
__diag_push();
-__diag_ignore(GCC, 8, "-Woverride-init",
+__diag_ignore_all("-Woverride-init",
"logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index a6fefe675ef1..3b7d4ac1e7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
+ u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index b21d99faa2d0..e1b761dcfa1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3927609abc44..8555299443f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a6e8d7bd9588..7667d103cd0e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 6b6d0de09619..a38226d7cc6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
{
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -92,19 +95,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 prio, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 base_register;
- u32 value;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
- if (queue >= 4)
- queue -= 4;
+ ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
- value = readl(ioaddr + base_register);
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
+ GMAC_RXQCTRL_PSRQX_MASK(i));
- value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
- value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
GMAC_RXQCTRL_PSRQX_MASK(queue);
- writel(value, ioaddr + base_register);
+
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ } else {
+ queue -= 4;
+
+ ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ }
}
static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
@@ -1356,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 1af2f89a0504..f8e7775bb633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
- MAC_100000FD;
-}
-
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -105,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value, reg;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
- if (queue >= 4)
+ ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
+
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
+ XGMAC_PSRQ(i));
+
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
+
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ } else {
queue -= 4;
- value = readl(ioaddr + reg);
- value &= ~XGMAC_PSRQ(queue);
- value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+ ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
- writel(value, ioaddr + reg);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ }
}
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
@@ -1516,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1577,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1637,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1674,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index dff02d75d519..5d1ea3e07459 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,7 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_oversize_g;
unsigned int mmc_tx_lpi_usec;
unsigned int mmc_tx_lpi_tran;
@@ -80,6 +81,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_error;
unsigned int mmc_rx_lpi_usec;
unsigned int mmc_rx_lpi_tran;
unsigned int mmc_rx_discard_frames_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 7eb477faa75a..0fab842902a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -53,6 +53,7 @@
#define MMC_TX_EXCESSDEF 0x6c
#define MMC_TX_PAUSE_FRAME 0x70
#define MMC_TX_VLAN_FRAME_G 0x74
+#define MMC_TX_OVERSIZE_G 0x78
/* MMC RX counter registers */
#define MMC_RX_FRAMECOUNT_GB 0x80
@@ -79,6 +80,13 @@
#define MMC_RX_FIFO_OVERFLOW 0xd4
#define MMC_RX_VLAN_FRAMES_GB 0xd8
#define MMC_RX_WATCHDOG_ERROR 0xdc
+#define MMC_RX_ERROR 0xe0
+
+#define MMC_TX_LPI_USEC 0xec
+#define MMC_TX_LPI_TRAN 0xf0
+#define MMC_RX_LPI_USEC 0xf4
+#define MMC_RX_LPI_TRAN 0xf8
+
/* IPC*/
#define MMC_RX_IPC_INTR_MASK 0x100
#define MMC_RX_IPC_INTR 0x108
@@ -283,6 +291,9 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
+ mmc->mmc_tx_oversize_g += readl(mmcaddr + MMC_TX_OVERSIZE_G);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_TX_LPI_TRAN);
/* MMC RX counter registers */
mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
@@ -316,6 +327,10 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
+ mmc->mmc_rx_error += readl(mmcaddr + MMC_RX_ERROR);
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_RX_LPI_TRAN);
+
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e1537a57815f..542e2633a6f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_oversize_g),
STMMAC_MMC_STAT(mmc_tx_lpi_usec),
STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
@@ -238,6 +239,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_error),
STMMAC_MMC_STAT(mmc_rx_lpi_usec),
STMMAC_MMC_STAT(mmc_rx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 24cd80490d19..7c6fb14b5555 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
- /* Half-Duplex can only work with single tx queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- else
- priv->phylink_config.mac_capabilities |=
- (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
-
- stmmac_set_half_duplex(priv);
-
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@@ -7342,6 +7327,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
+ int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7355,7 +7341,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_set_half_duplex(priv);
+ stmmac_mac_phylink_get_caps(priv);
+
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2939a21ca74f..1d00e21808c1 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91c28..f89716b1cfb6 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index cf7b73f8f450..b69af69a1ccd 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
if (!i)
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ENXIO;
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
+ rx_chn->irq[i] = ret;
}
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6dff2c85682d..6fae161cbcb8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5b5d5e4310d1..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -571,7 +571,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
+ snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9df39cf8b097..1072e2210aed 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev)
}
/* map device registers */
- lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
+ lp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
return -ENOMEM;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2f6739fe78af..6c2835086b57 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -822,7 +822,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs4)
@@ -929,7 +929,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs6)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ba4704c2c640..e62d6cbdf9bc 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1098,11 +1098,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a6fcbda64ecc..2b6ec979a62f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (!nvdev->recv_buf_gpadl_handle.decrypted)
+ vfree(nvdev->recv_buf);
+ if (!nvdev->send_buf_gpadl_handle.decrypted)
+ vfree(nvdev->send_buf);
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0206b84284ab..ff016c11b4a0 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- struct macsec_rx_sc *rx_sc = NULL;
+ const struct macsec_ops *ops;
- if (md_dst && md_dst->type == METADATA_MACSEC)
- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+ ops = macsec_get_ops(macsec, NULL);
- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ } else if (ndev->flags & IFF_PROMISC) {
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index fa8c6fdcf301..d7aaefb5226b 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
phy_ctrl_val = dp83869->mode;
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
- dp83869->mode == DP83869_RGMII_100_BASE) {
+ dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
phy_ctrl_val |= DP83869_OP_MODE_MII;
} else {
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 0f3a1538a8b8..f4f9412d0cd7 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -216,6 +216,9 @@
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
#define MTK_PHY_LED_ON_LINK100 BIT(1)
#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
+ MTK_PHY_LED_ON_LINK100 |\
+ MTK_PHY_LED_ON_LINK1000)
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
@@ -231,6 +234,12 @@
#define MTK_PHY_LED_BLINK_100RX BIT(3)
#define MTK_PHY_LED_BLINK_10TX BIT(4)
#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
+ MTK_PHY_LED_BLINK_100RX |\
+ MTK_PHY_LED_BLINK_1000RX)
+#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
+ MTK_PHY_LED_BLINK_100TX |\
+ MTK_PHY_LED_BLINK_1000TX)
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (blink < 0)
return -EIO;
- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK10)) ||
- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
set_bit(bit_netdev, &priv->led_state);
else
clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (!rules)
return 0;
- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ if (on & MTK_PHY_LED_ON_LINK)
*rules |= BIT(TRIGGER_NETDEV_LINK);
if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (on & MTK_PHY_LED_ON_HDX)
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ if (blink & MTK_PHY_LED_BLINK_RX)
*rules |= BIT(TRIGGER_NETDEV_RX);
- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ if (blink & MTK_PHY_LED_BLINK_TX)
*rules |= BIT(TRIGGER_NETDEV_TX);
return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
on |= MTK_PHY_LED_ON_LINK1000;
if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= MTK_PHY_LED_BLINK_10RX |
- MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_1000RX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+ MTK_PHY_LED_BLINK_RX;
}
if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= MTK_PHY_LED_BLINK_10TX |
- MTK_PHY_LED_BLINK_100TX |
- MTK_PHY_LED_BLINK_1000TX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+ MTK_PHY_LED_BLINK_TX;
}
if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
MTK_PHY_LED0_ON_CTRL,
MTK_PHY_LED_ON_FDX |
MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK1000,
+ MTK_PHY_LED_ON_LINK,
on);
if (ret)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8b8634600c51..ddb50a0e2bc8 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2431,6 +2431,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
+ int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2477,9 +2478,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ }
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
lan8814_config_ts_intr(ptp_priv->phydev, true);
@@ -2537,7 +2543,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
}
}
-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2547,7 +2553,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
ptp_header = ptp_parse_header(skb, type);
skb_pull_inline(skb, ETH_HLEN);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2559,7 +2569,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
bool ret = false;
u16 skb_sig;
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ return ret;
/* Iterate over all RX timestamps and match it with the received skbs */
spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
@@ -2834,7 +2845,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
return 0;
}
-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2842,7 +2853,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
type = ptp_classify_raw(skb);
ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2856,7 +2871,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
- lan8814_get_sig_tx(skb, &skb_sig);
+ if (!lan8814_get_sig_tx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
continue;
@@ -2910,7 +2926,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
continue;
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 4717c59d51d0..e79657f76bea 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -797,7 +797,7 @@ static int at8031_parse_dt(struct phy_device *phydev)
static int at8031_probe(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
+ struct at803x_priv *priv;
int mode_cfg;
int ccr;
int ret;
@@ -806,6 +806,8 @@ static int at8031_probe(struct phy_device *phydev)
if (ret)
return ret;
+ priv = phydev->priv;
+
/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
* options.
*/
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0b3f21cba552..92da8c03d960 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 88e084534853..df9d767cb524 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev->net, mac);
+ if (!is_local_ether_addr(mac))
+ dev->net->addr_assign_type = NET_ADDR_PERM;
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
@@ -1315,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1452,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
-
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1693,7 +1692,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1706,7 +1704,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e2e181378f41..a5469cf5cf67 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1368,6 +1368,9 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1431,6 +1434,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+ {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c22d1118a133..115c3c5414f2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3807,6 +3807,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -3814,13 +3815,28 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
if (rxfh->indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ update = true;
}
- if (rxfh->key)
+
+ if (rxfh->key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ update = true;
+ }
- virtnet_commit_rss_command(vi);
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -4729,13 +4745,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 3495591a5c29..ba319fc21957 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1615,6 +1615,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index a6a37d67a50a..9f4bf41a3d41 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -9020,6 +9020,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
+ /* Note: read_lock_bh() calls rcu_read_lock() */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9050,7 +9051,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
}
/* get anycast address */
- for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 072b0a5827d1..eca1457caa0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 90
+#define IWL_BZ_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 9b79279fd76c..dbbcb2d0968c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 90
+#define IWL_SC_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index db6d7013df66..c3bdf433d8f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -3081,8 +3081,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
- u32 policy;
- u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@@ -3113,13 +3111,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
- policy = le32_to_cpu(dump_data->trig->apply_policy);
- time_point = le32_to_cpu(dump_data->trig->time_point);
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
+ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
}
+
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 553c6fffc7c6..52518a47554e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1260,15 +1260,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ if (hweight16(vif->active_links) > 1) {
/*
- * Select the 'best' link. May need to revisit, it seems
- * better to not optimize for throughput but rather range,
- * reliability and power here - and select 2.4 GHz ...
+ * Select the 'best' link.
+ * May need to revisit, it seems better to not optimize
+ * for throughput but rather range, reliability and
+ * power here - and select 2.4 GHz ...
*/
- primary_link =
- iwl_mvm_mld_get_primary_link(mvm, vif,
- vif->active_links);
+ primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
vif->active_links))
@@ -1277,6 +1277,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ret = ieee80211_set_active_links(vif, BIT(primary_link));
if (ret)
return ret;
+ } else if (vif->active_links) {
+ primary_link = __ffs(vif->active_links);
} else {
primary_link = 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 51b01f7528be..7fe57ecd0682 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -748,7 +748,9 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct dentry *dbgfs_dir = vif->debugfs_dir;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
/* this will happen in monitor mode */
if (!dbgfs_dir)
@@ -761,10 +763,11 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir);
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
}
void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 4863a3c74640..d84d7e955bb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index f13f13e6b71a..fe5bba8561d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -46,6 +46,27 @@ static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+ if (link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ link_conf);
+ }
+
+ return 0;
+}
+
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -55,19 +76,14 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+ int ret;
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
- link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
- mvmvif);
- if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
- return -EINVAL;
-
- rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
- link_conf);
- }
+ ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf);
+ if (ret)
+ return ret;
/* Update SF - Disable if needed. if this fails, SF might still be on
* while many macs are bound, which is forbidden - so fail the binding.
@@ -248,6 +264,25 @@ send_cmd:
return ret;
}
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ /* mac80211 thought we have the link, but it was never configured */
+ if (WARN_ON(!link_info ||
+ link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ NULL);
+ iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
+ return 0;
+}
+
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -257,15 +292,11 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
int ret;
- /* mac80211 thought we have the link, but it was never configured */
- if (WARN_ON(!link_info ||
- link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf);
+ if (ret)
return 0;
- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
- NULL);
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
cmd.spec_link_id = link_conf->link_id;
cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1935630d3def..8f4b063d6243 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -360,7 +360,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
!iwlwifi_mod_params.disable_11be)
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -1577,8 +1577,14 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->mvm = mvm;
/* the first link always points to the default one */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out;
+
/*
* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
@@ -1783,6 +1789,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = NULL;
}
+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 1628bf55458f..23e64a757cfe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -855,10 +855,15 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
{
- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+ int ret;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
+ return 0;
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 44571114fb15..f0b24f00938b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1916,11 +1916,15 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
/* Links */
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active);
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 2ecd32bed752..045c862a8fc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (ret)
return ERR_PTR(ret);
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
return ERR_PTR(-EIO);
+ }
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
if (!resp)
return ERR_PTR(-ENOMEM);
- iwl_free_resp(&cmd);
return resp;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1484eaedf452..ce8d83c771a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta)
+ struct ieee80211_sta *sta)
{
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
kfree_skb(skb);
return;
}
- if (sta && sta->valid_links && link_sta) {
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
-
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
@@ -588,7 +580,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, NULL /* FIXME */);
+ sta);
reorder_buf->num_stored--;
}
}
@@ -2213,6 +2205,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (IS_ERR(sta))
sta = NULL;
link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
@@ -2356,8 +2353,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
- link_sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
}
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f3e3986b4c72..11559563ae38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2813,7 +2813,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a59d264a11c5..ad960faceb0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -879,9 +879,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
- iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(MAC_CONF_GROUP,
- SESSION_PROTECTION_CMD), 2);
+ iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ SESSION_PROTECTION_NOTIF, 2);
int id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 33973a60d0bf..6229c785c845 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1589,9 +1589,9 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
return;
tfd_num = iwl_txq_get_cmd_index(txq, ssn);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
spin_lock_bh(&txq->lock);
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 367459bd1345..708132d5be2a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2233,7 +2233,7 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
- rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index b55fe320633c..59e1fc0018df 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -3899,7 +3899,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
}
nla_for_each_nested(peer, peers, rem) {
- struct cfg80211_pmsr_result result;
+ struct cfg80211_pmsr_result result = {};
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
if (err)
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
index 9f43f256db1d..f0a4783baf1f 100644
--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
- return ioread64(hw_info->ap_pdn_base + offset);
+ return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
- iowrite64(address, reg + offset);
+ iowrite64_lo_hi(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index abc41a7089fa..97163e1e5783 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -137,8 +137,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
return -ENODEV;
}
- gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
+ REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
return 0;
@@ -316,8 +317,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check current processing TGPD, 64-bit address is in a table by Q index */
- ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr != ul_curr_addr) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
index 76da4c15e3de..f071ec7ff23d 100644
--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
- iowrite64(0, reg);
+ iowrite64_lo_hi(0, reg);
}
}
@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
iowrite32(cfg->trsl_id, reg);
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
/* Ensure ATR is set */
- ioread64(reg);
+ ioread64_lo_hi(reg);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ad29f370034e..8d2aee88526c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
return NULL;
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+ skb_mark_for_recycle(skb);
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 7eb17f46a815..9e1a34e23af2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 943d72bdd794..27281a9a8951 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2076,6 +2076,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
+ struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
sector_t capacity;
unsigned lbaf;
@@ -2088,9 +2089,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- ret = -ENODEV;
+ ret = -ENXIO;
goto out;
}
+ lbaf = nvme_lbaf_index(id->flbas);
if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
@@ -2098,8 +2100,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_query_zone_info(ns, lbaf, &zi);
+ if (ret < 0)
+ goto out;
+ }
+
blk_mq_freeze_queue(ns->disk->queue);
- lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2112,13 +2120,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
capacity = 0;
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf, &lim);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
- }
+ ns->head->ids.csi == NVME_CSI_ZNS)
+ nvme_update_zone_info(ns, &lim, &zi);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2201,6 +2204,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
}
if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
blk_mq_freeze_queue(ns->head->disk->queue);
@@ -2212,7 +2216,26 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
+ /*
+ * queue_limits mixes values that are the hardware limitations
+ * for bio splitting with what is the device configuration.
+ *
+ * For NVMe the device configuration can change after e.g. a
+ * Format command, and we really want to pick up the new format
+ * value here. But we must still stack the queue limits to the
+ * least common denominator for multipathing to split the bios
+ * properly.
+ *
+ * To work around this, we explicitly set the device
+ * configuration to those that we just queried, but only stack
+ * the splitting limits in to make sure we still obey possibly
+ * lower limitations of other controllers.
+ */
lim = queue_limits_start_update(ns->head->disk->queue);
+ lim.logical_block_size = ns_lim->logical_block_size;
+ lim.physical_block_size = ns_lim->physical_block_size;
+ lim.io_min = ns_lim->io_min;
+ lim.io_opt = ns_lim->io_opt;
queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
ns->head->disk->disk_name);
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 68a5d971657b..a5b29e9ad342 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2428,7 +2428,7 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
* controller. Called after last nvme_put_ctrl() call
*/
static void
-nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -3384,7 +3384,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
- .free_ctrl = nvme_fc_nvme_ctrl_freed,
+ .free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 24193fcb8bd5..d0ed64dc7380 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1036,10 +1036,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
+struct nvme_zone_info {
+ u64 zone_size;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+};
+
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim);
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi);
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi);
#ifdef CONFIG_BLK_DEV_ZONED
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 722384bcc765..77aa0f440a6d 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim)
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi)
{
struct nvme_effects_log *log = ns->head->effects;
struct nvme_command c = { };
@@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
goto free_data;
}
- ns->head->zsze =
- nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
- if (!is_power_of_2(ns->head->zsze)) {
+ zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
+ if (!is_power_of_2(zi->zone_size)) {
dev_warn(ns->ctrl->device,
- "invalid zone size:%llu for namespace:%u\n",
- ns->head->zsze, ns->head->ns_id);
+ "invalid zone size: %llu for namespace: %u\n",
+ zi->zone_size, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
+ zi->max_open_zones = le32_to_cpu(id->mor) + 1;
+ zi->max_active_zones = le32_to_cpu(id->mar) + 1;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
- lim->zoned = 1;
- lim->max_open_zones = le32_to_cpu(id->mor) + 1;
- lim->max_active_zones = le32_to_cpu(id->mar) + 1;
- lim->chunk_sectors = ns->head->zsze;
- lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
}
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi)
+{
+ lim->zoned = 1;
+ lim->max_open_zones = zi->max_open_zones;
+ lim->max_active_zones = zi->max_active_zones;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->chunk_sectors = ns->head->zsze =
+ nvme_lba_to_sect(ns->head, zi->zone_size);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+}
+
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
unsigned int nr_zones, size_t *buflen)
{
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 77a6e817b315..a2325330bf22 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1613,6 +1613,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
return ERR_PTR(-EINVAL);
}
+ if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
+ pr_err("can't create subsystem using unique discovery NQN\n");
+ return ERR_PTR(-EINVAL);
+ }
+
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
if (IS_ERR(subsys))
return ERR_CAST(subsys);
@@ -2159,7 +2164,49 @@ static const struct config_item_type nvmet_hosts_type = {
static struct config_group nvmet_hosts_group;
+static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct list_head *entry;
+ size_t len;
+
+ len = strcspn(page, "\n");
+ if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ struct config_item *item =
+ container_of(entry, struct config_item, ci_entry);
+
+ if (!strncmp(config_item_name(item), page, len)) {
+ pr_err("duplicate NQN %s\n", config_item_name(item));
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+ }
+ }
+ memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+ memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ up_write(&nvmet_config_sem);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
+
+static struct configfs_attribute *nvmet_root_attrs[] = {
+ &nvmet_root_attr_discovery_nqn,
+ NULL,
+};
+
static const struct config_item_type nvmet_root_type = {
+ .ct_attrs = nvmet_root_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6bbe4df0166c..8860a3eb71ec 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1541,6 +1541,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
}
down_read(&nvmet_config_sem);
+ if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
+ up_read(&nvmet_config_sem);
+ return nvmet_disc_subsys;
+ }
+ }
list_for_each_entry(p, &port->subsystems, entry) {
if (!strncmp(p->subsys->subsysnqn, subsysnqn,
NVMF_NQN_SIZE)) {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index fd229f310c93..337ee1cb09ae 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1115,16 +1115,21 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
}
static bool
-nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
{
struct nvmet_fc_tgt_assoc *a;
+ bool found = false;
+ rcu_read_lock();
list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
- if (association_id == a->association_id)
- return true;
+ if (association_id == a->association_id) {
+ found = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return found;
}
static struct nvmet_fc_tgt_assoc *
@@ -1164,13 +1169,11 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- rcu_read_lock();
- if (!nvmet_fc_assoc_exits(tgtport, ran)) {
+ if (!nvmet_fc_assoc_exists(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
done = true;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
} while (!done);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3bf27052832f..4d57a4e34105 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/device.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -667,6 +668,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
{
struct of_changeset_entry *ce, *cen;
+ /*
+ * When a device is deleted, the device links to/from it are also queued
+ * for deletion. Until these device links are freed, the devices
+ * themselves aren't freed. If the device being deleted is due to an
+ * overlay change, this device might be holding a reference to a device
+ * node that will be freed. So, wait until all already pending device
+ * links are deleted before freeing a device node. This ensures we don't
+ * free any device node that has a non-zero reference count.
+ */
+ device_link_wait_removal();
+
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
__of_changeset_entry_destroy(ce);
}
diff --git a/drivers/of/module.c b/drivers/of/module.c
index 0e8aa974f0f2..f58e624953a2 100644
--- a/drivers/of/module.c
+++ b/drivers/of/module.c
@@ -16,6 +16,14 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
ssize_t csize;
ssize_t tsize;
+ /*
+ * Prevent a kernel oops in vsnprintf() -- it only allows passing a
+ * NULL ptr when the length is also 0. Also filter out the negative
+ * lengths...
+ */
+ if ((len > 0 && !str) || len < 0)
+ return -EINVAL;
+
/* Name & Type */
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bf4833221816..eff7f5df08e2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3766,14 +3766,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
/*
- * Apparently the LSI / Agere FW643 can't recover after a Secondary Bus
- * Reset and requires a power-off or suspend/resume and rescan. Prevent
- * use of that reset.
- */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5900, quirk_no_bus_reset);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5901, quirk_no_bus_reset);
-
-/*
* Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
* automatically disables LTSSM when Secondary Bus Reset is received and
* the device stops working. Prevent bus reset for these devices. With
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index c78a6fd6c57f..b4efdddb2ad9 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -313,6 +313,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
u64 event_config = 0;
uint64_t cmask;
+ /* driver does not support branch stack sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7b67..11fcb1867118 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 41162d7228c9..1d1db1737422 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b75d7..20d4c020a83c 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7d585a4bbbba..c21cdb8dbfe7 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -77,6 +77,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -102,6 +103,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -126,6 +128,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -150,6 +153,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -174,6 +178,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -2150,9 +2155,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2162,6 +2167,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2184,7 +2190,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
index f5cfacf9be96..181057421c11 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V5_H_
/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
index 01a20d3be4b8..fa967a1af058 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V6_H_
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index a34f67bb7e61..b60a4b60451e 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -87,6 +87,7 @@ config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
with Samsung IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 76b9cf417591..bf74e429ff46 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961ce11..9857ee45b89e 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -40,6 +40,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 13cd614e12a1..751fecd466e3 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -69,7 +69,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -236,19 +235,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -473,9 +477,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
index 489ea1778353..db2a7600ae2b 100644
--- a/drivers/pinctrl/aspeed/Makefile
+++ b/drivers/pinctrl/aspeed/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Aspeed pinctrl support
-ccflags-y += $(call cc-option,-Woverride-init)
+ccflags-y += -Woverride-init
obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 49f89b70dcec..7f66ec73199a 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_COND_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 8ea867c2a01a..62bc24f6dcc7 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_dev)
return -ENOMEM;
- ret = devm_serdev_device_open(dev, serdev);
- if (ret) {
- dev_err(dev, "Unable to open UART device");
- return ret;
- }
-
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
return ret;
}
- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
- if (ret < 0) {
- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
- return ret;
- }
-
- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
-
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ee2e164f86b9..38c932df6446 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -598,6 +598,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH18-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH18-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b456370166b6..b4f49720c87f 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad..7d6079b02589 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o
+ tee-if.o spc.o pmf-quirks.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index d0cf46e2fc8e..1157ec148880 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -343,7 +343,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
output.supported_functions, output.notification_mask, output.version);
@@ -437,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
if (ACPI_FAILURE(status)) {
- dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+ dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
return -EINVAL;
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 5d4f80698a8b..64e6e34a2a9a 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -445,6 +445,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644
index 000000000000..0b2eb0ae85fe
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+ u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+ .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "ROG Zephyrus G14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
+ {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+ struct quirk_entry *quirks;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+
+ quirks = dmi_id->driver_data;
+ if (quirks->supported_func) {
+ dev->supported_func = quirks->supported_func;
+ pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ }
+}
+
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 8c4df5753f40..eeedd0c0395a 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -720,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b27a6..c7a827645864 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -49,6 +49,8 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1076", 0},
{"INTC1077", 0},
{"INTC1078", 0},
+ {"INTC107B", 0},
+ {"INTC10CB", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -504,6 +506,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +548,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 08df9494603c..30951f7131cd 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -719,6 +719,7 @@ static struct miscdevice isst_if_char_driver = {
};
static const struct x86_cpu_id hpm_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bd75d61ff8a6..ef730200a04b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -29,7 +29,7 @@
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
-#define UNCORE_MINOR_VERSION 1
+#define UNCORE_MINOR_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -329,7 +329,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
goto remove_clusters;
}
- if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c86f5..79bb2c801daa 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -258,9 +256,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
- unsigned long long vgbs;
- acpi_status status;
-
/* See dual_accel_detect.h for more info */
if (dual_accel)
return false;
@@ -268,8 +263,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
if (!dmi_check_system(dmi_switches_allow_list))
return false;
- status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
- return ACPI_SUCCESS(status);
+ return acpi_has_method(handle, "VGBS");
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -316,6 +310,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ad3c39e9e9f5..e714ee6298dd 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -736,7 +736,7 @@ static int acpi_add(struct acpi_device *device)
default:
year = 2019;
}
- pr_info("product: %s year: %d\n", product, year);
+ pr_info("product: %s year: %d\n", product ?: "unknown", year);
if (year >= 2019)
battery_limit_use_wmbb = 1;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 291f14ef6702..77244c9aa60d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -264,6 +264,7 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_IGNORE, 0x0e00, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
{ KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
@@ -3523,9 +3524,10 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
(dev->kbd_mode == SCI_KBD_MODE_ON) ?
LED_FULL : LED_OFF);
break;
+ case 0x8e: /* Power button pressed */
+ break;
case 0x85: /* Unknown */
case 0x8d: /* Unknown */
- case 0x8e: /* Unknown */
case 0x94: /* Unknown */
case 0x95: /* Unknown */
default:
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index d70f793ce4b3..403525cc1783 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -443,7 +443,7 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- if (args->args_count > 1)
+ if (args->args_count > 0)
pwm->args.period = args->args[0];
pwm->args.polarity = PWM_POLARITY_NORMAL;
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
index 043736972cb9..c8425493b95d 100644
--- a/drivers/pwm/pwm-dwc-core.c
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -172,7 +172,6 @@ struct pwm_chip *dwc_pwm_alloc(struct device *dev)
dwc->clk_ns = 10;
chip->ops = &dwc_pwm_ops;
- dev_set_drvdata(dev, chip);
return chip;
}
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 676eaf8d7a53..fb3eadf6fbc4 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -31,26 +31,34 @@ static const struct dwc_pwm_info ehl_pwm_info = {
.size = 0x1000,
};
-static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
+static int dwc_pwm_init_one(struct device *dev, struct dwc_pwm_drvdata *ddata, unsigned int idx)
{
struct pwm_chip *chip;
struct dwc_pwm *dwc;
+ int ret;
chip = dwc_pwm_alloc(dev);
if (IS_ERR(chip))
return PTR_ERR(chip);
dwc = to_dwc_pwm(chip);
- dwc->base = base + offset;
+ dwc->base = ddata->io_base + (ddata->info->size * idx);
- return devm_pwmchip_add(dev, chip);
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return ret;
+
+ ddata->chips[idx] = chip;
+ return 0;
}
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
const struct dwc_pwm_info *info;
struct device *dev = &pci->dev;
- int i, ret;
+ struct dwc_pwm_drvdata *ddata;
+ unsigned int idx;
+ int ret;
ret = pcim_enable_device(pci);
if (ret)
@@ -63,17 +71,25 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
info = (const struct dwc_pwm_info *)id->driver_data;
-
- for (i = 0; i < info->nr; i++) {
- /*
- * No need to check for pcim_iomap_table() failure,
- * pcim_iomap_regions() already does it for us.
- */
- ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+ ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ /*
+ * No need to check for pcim_iomap_table() failure,
+ * pcim_iomap_regions() already does it for us.
+ */
+ ddata->io_base = pcim_iomap_table(pci)[0];
+ ddata->info = info;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ ret = dwc_pwm_init_one(dev, ddata, idx);
if (ret)
return ret;
}
+ dev_set_drvdata(dev, ddata);
+
pm_runtime_put(dev);
pm_runtime_allow(dev);
@@ -88,19 +104,24 @@ static void dwc_pwm_remove(struct pci_dev *pci)
static int dwc_pwm_suspend(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- if (chip->pwms[i].state.enabled) {
- dev_err(dev, "PWM %u in use by consumer (%s)\n",
- i, chip->pwms[i].label);
- return -EBUSY;
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ if (chip->pwms[i].state.enabled) {
+ dev_err(dev, "PWM %u in use by consumer (%s)\n",
+ i, chip->pwms[i].label);
+ return -EBUSY;
+ }
+ dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+ dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+ dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
- dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
- dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
- dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
return 0;
@@ -108,14 +129,19 @@ static int dwc_pwm_suspend(struct device *dev)
static int dwc_pwm_resume(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ }
}
return 0;
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
index a8b074841ae8..c6e2df5a6122 100644
--- a/drivers/pwm/pwm-dwc.h
+++ b/drivers/pwm/pwm-dwc.h
@@ -38,6 +38,12 @@ struct dwc_pwm_info {
unsigned int size;
};
+struct dwc_pwm_drvdata {
+ const struct dwc_pwm_info *info;
+ void __iomem *io_base;
+ struct pwm_chip *chips[];
+};
+
struct dwc_pwm_ctx {
u32 cnt;
u32 cnt2;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index d79a96679a26..d6596583ed4e 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -284,9 +284,9 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->sys_clk);
}
- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
+ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(imgchip->pwm_clk)) {
- dev_err(&pdev->dev, "failed to get imgchip clock\n");
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
return PTR_ERR(imgchip->pwm_clk);
}
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
index 2f4ac9591c8f..271dfad05d68 100644
--- a/drivers/ras/amd/fmpm.c
+++ b/drivers/ras/amd/fmpm.c
@@ -150,6 +150,8 @@ static unsigned int max_nr_fru;
/* Total length of record including headers and list of descriptor entries. */
static size_t max_rec_len;
+#define FMPM_MAX_REC_LEN (sizeof(struct fru_rec) + (sizeof(struct cper_fru_poison_desc) * 255))
+
/* Total number of SPA entries across all FRUs. */
static unsigned int spa_nr_entries;
@@ -475,6 +477,16 @@ static void set_rec_fields(struct fru_rec *rec)
struct cper_section_descriptor *sec_desc = &rec->sec_desc;
struct cper_record_header *hdr = &rec->hdr;
+ /*
+ * This is a saved record created with fewer max_nr_entries.
+ * Update the record lengths and keep everything else as-is.
+ */
+ if (hdr->record_length && hdr->record_length < max_rec_len) {
+ pr_debug("Growing record 0x%016llx from %u to %zu bytes\n",
+ hdr->record_id, hdr->record_length, max_rec_len);
+ goto update_lengths;
+ }
+
memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
hdr->revision = CPER_RECORD_REV;
hdr->signature_end = CPER_SIG_END;
@@ -489,19 +501,21 @@ static void set_rec_fields(struct fru_rec *rec)
hdr->error_severity = CPER_SEV_RECOVERABLE;
hdr->validation_bits = 0;
- hdr->record_length = max_rec_len;
hdr->creator_id = CPER_CREATOR_FMP;
hdr->notification_type = CPER_NOTIFY_MCE;
hdr->record_id = cper_next_record_id();
hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR;
sec_desc->section_offset = sizeof(struct cper_record_header);
- sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
sec_desc->revision = CPER_SEC_REV;
sec_desc->validation_bits = 0;
sec_desc->flags = CPER_SEC_PRIMARY;
sec_desc->section_type = CPER_SECTION_TYPE_FMP;
sec_desc->section_severity = CPER_SEV_RECOVERABLE;
+
+update_lengths:
+ hdr->record_length = max_rec_len;
+ sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
}
static int save_new_records(void)
@@ -512,16 +526,18 @@ static int save_new_records(void)
int ret = 0;
for_each_fru(i, rec) {
- if (rec->hdr.record_length)
+ /* No need to update saved records that match the current record size. */
+ if (rec->hdr.record_length == max_rec_len)
continue;
+ if (!rec->hdr.record_length)
+ set_bit(i, new_records);
+
set_rec_fields(rec);
ret = update_record_on_storage(rec);
if (ret)
goto out_clear;
-
- set_bit(i, new_records);
}
return ret;
@@ -641,12 +657,7 @@ static int get_saved_records(void)
int ret, pos;
ssize_t len;
- /*
- * Assume saved records match current max size.
- *
- * However, this may not be true depending on module parameters.
- */
- old = kmalloc(max_rec_len, GFP_KERNEL);
+ old = kmalloc(FMPM_MAX_REC_LEN, GFP_KERNEL);
if (!old) {
ret = -ENOMEM;
goto out;
@@ -663,21 +674,31 @@ static int get_saved_records(void)
* Make sure to clear temporary buffer between reads to avoid
* leftover data from records of various sizes.
*/
- memset(old, 0, max_rec_len);
+ memset(old, 0, FMPM_MAX_REC_LEN);
- len = erst_read_record(record_id, &old->hdr, max_rec_len,
+ len = erst_read_record(record_id, &old->hdr, FMPM_MAX_REC_LEN,
sizeof(struct fru_rec), &CPER_CREATOR_FMP);
if (len < 0)
continue;
- if (len > max_rec_len) {
- pr_debug("Found record larger than max_rec_len\n");
+ new = get_valid_record(old);
+ if (!new) {
+ erst_clear(record_id);
continue;
}
- new = get_valid_record(old);
- if (!new)
- erst_clear(record_id);
+ if (len > max_rec_len) {
+ unsigned int saved_nr_entries;
+
+ saved_nr_entries = len - sizeof(struct fru_rec);
+ saved_nr_entries /= sizeof(struct cper_fru_poison_desc);
+
+ pr_warn("Saved record found with %u entries.\n", saved_nr_entries);
+ pr_warn("Please increase max_nr_entries to %u.\n", saved_nr_entries);
+
+ ret = -EINVAL;
+ goto out_end;
+ }
/* Restore the record */
memcpy(new, old, len);
diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h
index 4749ccdeeba1..5a2f48439258 100644
--- a/drivers/ras/debugfs.h
+++ b/drivers/ras/debugfs.h
@@ -4,6 +4,10 @@
#include <linux/debugfs.h>
+#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *ras_get_debugfs_root(void);
+#else
+static inline struct dentry *ras_get_debugfs_root(void) { return NULL; }
+#endif /* DEBUG_FS */
#endif /* __RAS_DEBUGFS_H__ */
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index a06f5f2d7932..9c2f0dd42613 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -267,10 +267,17 @@ static const struct i2c_device_id tps65132_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tps65132_id);
+static const struct of_device_id __maybe_unused tps65132_of_match[] = {
+ { .compatible = "ti,tps65132" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps65132_of_match);
+
static struct i2c_driver tps65132_i2c_driver = {
.driver = {
.name = "tps65132",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(tps65132_of_match),
},
.probe = tps65132_probe,
.id_table = tps65132_id,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f95d12345d98..920f550bc313 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 65d8b2cfd626..42791fa0b80e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -504,6 +504,11 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
+ CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->private->intparm,
+ cdev->private->flags.fake_irb);
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 40c97f873075..acd6790dba4d 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3d9f0834c78b..a1cb39f4b7a2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 2c8e964425dc..43778b088ffa 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -292,13 +292,16 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
- dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
- dmb->cpu_addr, dmb->dma_addr);
+ dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
+ DMA_FROM_DEVICE);
+ folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
+ struct folio *folio;
unsigned long bit;
+ int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr,
- GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!dmb->cpu_addr)
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY, get_order(dmb->dmb_len));
- return dmb->cpu_addr ? 0 : -ENOMEM;
+ if (!folio) {
+ rc = -ENOMEM;
+ goto out_bit;
+ }
+
+ dmb->cpu_addr = folio_address(folio);
+ dmb->dma_addr = dma_map_page(&ism->pdev->dev,
+ virt_to_page(dmb->cpu_addr), 0,
+ dmb->dmb_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ kfree(dmb->cpu_addr);
+out_bit:
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a0cce6872075..f0b8b709649f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1179,6 +1179,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
}
}
+/**
+ * qeth_irq() - qeth interrupt handler
+ * @cdev: ccw device
+ * @intparm: expect pointer to iob
+ * @irb: Interruption Response Block
+ *
+ * In the good path:
+ * corresponding qeth channel is locked with last used iob as active_cmd.
+ * But this function is also called for error interrupts.
+ *
+ * Caller ensures that:
+ * Interrupts are disabled; ccw device lock is held;
+ *
+ */
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
@@ -1220,11 +1234,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- qeth_unlock_channel(card, channel);
-
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
return;
@@ -1268,6 +1281,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
@@ -1276,6 +1290,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
+ if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
+ /* channel command hasn't started: retry.
+ * active_cmd is still set to last iob
+ */
+ QETH_CARD_TEXT(card, 2, "irqcc1");
+ rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
+ (addr_t)iob, 0, 0, iob->timeout);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "ccw retry on %x failed, rc = %i\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
+ qeth_unlock_channel(card, channel);
+ qeth_cancel_cmd(iob, rc);
+ }
+ return;
+ }
+
+ qeth_unlock_channel(card, channel);
+
if (iob) {
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 2c246e80c1c4..d91659811eb3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -833,7 +833,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
- spin_lock_bh(&tgt->cq_lock);
ctx_base_ptr = tgt->ctx_base;
tgt->ctx_base = NULL;
@@ -889,7 +888,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->sq, tgt->sq_dma);
tgt->sq = NULL;
}
- spin_unlock_bh(&tgt->cq_lock);
if (ctx_base_ptr)
iounmap(ctx_base_ptr);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 1befcd5b2a0f..fa07a6f54003 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -102,7 +102,9 @@ do { \
#define MAX_RETRIES 1
-static struct class * ch_sysfs_class;
+static const struct class ch_sysfs_class = {
+ .name = "scsi_changer",
+};
typedef struct {
struct kref ref;
@@ -930,7 +932,7 @@ static int ch_probe(struct device *dev)
mutex_init(&ch->lock);
kref_init(&ch->ref);
ch->device = sd;
- class_dev = device_create(ch_sysfs_class, dev,
+ class_dev = device_create(&ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
@@ -955,7 +957,7 @@ static int ch_probe(struct device *dev)
return 0;
destroy_dev:
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
put_device:
scsi_device_put(sd);
remove_idr:
@@ -974,7 +976,7 @@ static int ch_remove(struct device *dev)
dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
@@ -1003,11 +1005,9 @@ static int __init init_ch_module(void)
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
- ch_sysfs_class = class_create("scsi_changer");
- if (IS_ERR(ch_sysfs_class)) {
- rc = PTR_ERR(ch_sysfs_class);
+ rc = class_register(&ch_sysfs_class);
+ if (rc)
return rc;
- }
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
if (rc < 0) {
printk("Unable to get major %d for SCSI-Changer\n",
@@ -1022,7 +1022,7 @@ static int __init init_ch_module(void)
fail2:
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
fail1:
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
return rc;
}
@@ -1030,7 +1030,7 @@ static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
idr_destroy(&ch_index_idr);
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index debd36974119..e8382cc5cf23 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -28,7 +28,12 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
-static struct class *cxlflash_class;
+static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
+static const struct class cxlflash_class = {
+ .name = "cxlflash",
+ .devnode = cxlflash_devnode,
+};
+
static u32 cxlflash_major;
static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
@@ -3602,7 +3607,7 @@ static int init_chrdev(struct cxlflash_cfg *cfg)
goto err1;
}
- char_dev = device_create(cxlflash_class, NULL, devno,
+ char_dev = device_create(&cxlflash_class, NULL, devno,
NULL, "cxlflash%d", minor);
if (IS_ERR(char_dev)) {
rc = PTR_ERR(char_dev);
@@ -3880,14 +3885,12 @@ static int cxlflash_class_init(void)
cxlflash_major = MAJOR(devno);
- cxlflash_class = class_create("cxlflash");
- if (IS_ERR(cxlflash_class)) {
- rc = PTR_ERR(cxlflash_class);
+ rc = class_register(&cxlflash_class);
+ if (rc) {
pr_err("%s: class_create failed rc=%d\n", __func__, rc);
goto err;
}
- cxlflash_class->devnode = cxlflash_devnode;
out:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
@@ -3903,7 +3906,7 @@ static void cxlflash_class_exit(void)
{
dev_t devno = MKDEV(cxlflash_major, 0);
- class_destroy(cxlflash_class);
+ class_unregister(&cxlflash_class);
unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 097dfe4b620d..35f8e00850d6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1797,7 +1797,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
struct ata_link *link = &device->sata_dev.ap->link;
- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
msleep(2000);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 7d2a33514538..34f96cc35342 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2244,7 +2244,15 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
(sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
- ts->stat = SAS_PROTO_RESPONSE;
+ if (task->ata_task.use_ncq) {
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+
+ sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_PROTO_RESPONSE;
+ }
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 4f495a41ec4a..2d92549e5243 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->shost_state == SHOST_CREATED) {
/*
- * Free the shost_dev device name here if scsi_host_alloc()
- * and scsi_host_put() have been called but neither
+ * Free the shost_dev device name and remove the proc host dir
+ * here if scsi_host_{alloc,put}() have been called but neither
* scsi_host_add() nor scsi_remove_host() has been called.
* This avoids that the memory allocated for the shost_dev
- * name is leaked.
+ * name as well as the proc dir structure are leaked.
*/
+ scsi_proc_hostdir_rm(shost->hostt);
kfree(dev_name(&shost->shost_dev));
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a2204674b680..f6e6db8b8aba 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -135,7 +135,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
static inline void *alloc_smp_req(int size)
{
- u8 *p = kzalloc(size, GFP_KERNEL);
+ u8 *p = kzalloc(ALIGN(size, ARCH_DMA_MINALIGN), GFP_KERNEL);
if (p)
p[0] = SMP_REQUEST;
return p;
@@ -1621,6 +1621,16 @@ out_err:
/* ---------- Domain revalidation ---------- */
+static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,
+ u8 *sas_addr,
+ enum sas_device_type *type)
+{
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);
+ *type = to_dev_type(&disc_resp->disc);
+ if (*type == SAS_PHY_UNUSED)
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
+}
+
static int sas_get_phy_discover(struct domain_device *dev,
int phy_id, struct smp_disc_resp *disc_resp)
{
@@ -1674,13 +1684,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
return -ENOMEM;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
- if (res == 0) {
- memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
- SAS_ADDR_SIZE);
- *type = to_dev_type(&disc_resp->disc);
- if (*type == 0)
- memset(sas_addr, 0, SAS_ADDR_SIZE);
- }
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);
kfree(disc_resp);
return res;
}
@@ -1940,6 +1945,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_device_type type = SAS_PHY_UNUSED;
+ struct smp_disc_resp *disc_resp;
u8 sas_addr[SAS_ADDR_SIZE];
char msg[80] = "";
int res;
@@ -1951,33 +1957,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(dev->sas_addr), phy_id, msg);
memset(sas_addr, 0, SAS_ADDR_SIZE);
- res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_FUNC_ACC:
break;
case -ECOMM:
break;
default:
- return res;
+ goto out_free_resp;
}
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);
+
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
/*
- * Even though the PHY is empty, for convenience we discover
- * the PHY to update the PHY info, like negotiated linkrate.
+ * Even though the PHY is empty, for convenience we update
+ * the PHY info, like negotiated linkrate.
*/
- sas_ex_phy_discover(dev, phy_id);
- return res;
+ if (res == 0)
+ sas_set_ex_phy(dev, phy_id, disc_resp);
+ goto out_free_resp;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
@@ -1989,7 +2003,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
action = ", needs recovery";
pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
- return res;
+ goto out_free_resp;
}
/* we always have to delete the old device when we went here */
@@ -1998,7 +2012,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return sas_discover_new(dev, phy_id);
+ res = sas_discover_new(dev, phy_id);
+out_free_resp:
+ kfree(disc_resp);
+ return res;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 30d20d37554f..98ca7df003ef 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1333,7 +1333,6 @@ struct lpfc_hba {
struct timer_list fabric_block_timer;
unsigned long bit_flags;
atomic_t num_rsrc_err;
- atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1438,6 +1437,7 @@ struct lpfc_hba {
struct timer_list inactive_vmid_poll;
/* RAS Support */
+ spinlock_t ras_fwlog_lock; /* do not take while holding another lock */
struct lpfc_ras_fwlog ras_fwlog;
uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 365c7e96070b..3c534b3cfe91 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5865,9 +5865,9 @@ lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
return -EINVAL;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
state = phba->ras_fwlog.state;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (state == REG_INPROGRESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d80e6e81053b..529df1768fa8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2513,7 +2513,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENOMEM;
}
- dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ dmabuff = mbox->ctx_buf;
mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
@@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
}
cmdwqe = &cmdiocbq->wqe;
- memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
if (phba->sli_rev < LPFC_SLI_REV4) {
rspwqe = &rspiocbq->wqe;
- memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ memset(rspwqe, 0, sizeof(*rspwqe));
}
INIT_LIST_HEAD(&head);
@@ -3376,7 +3376,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- dd_data = pmboxq->ctx_ndlp;
+ dd_data = pmboxq->ctx_u.dd_data;
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3553,7 +3553,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- dd_data = pmboxq->ctx_buf;
+ dd_data = pmboxq->ctx_u.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3940,7 +3940,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4112,7 +4112,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4460,7 +4460,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4747,7 +4747,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
- pmboxq->ctx_buf = ext;
+ pmboxq->ext_buf = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
@@ -4875,7 +4875,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
- pmboxq->ctx_ndlp = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -5070,12 +5070,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5132,13 +5132,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -ESRCH;
goto ras_job_error;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5149,10 +5149,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5268,13 +5268,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state == ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -EINPROGRESS;
goto ras_job_error;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ab5af10c8a16..a2d2b02b3418 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2194,12 +2194,12 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
memset(buffer, 0, size);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (phba->ras_fwlog.state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
return -EINVAL;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
list_for_each_entry_safe(dmabuf, next,
&phba->ras_fwlog.fwlog_buff_list, list) {
@@ -2250,13 +2250,13 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
int size;
int rc = -ENOMEM;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (phba->ras_fwlog.state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -EINVAL;
goto out;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE,
phba->cfg_ras_fwlog_buffsize, &size))
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 28e56542e072..f7c28dc73bf6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4437,23 +4437,23 @@ lpfc_els_retry_delay(struct timer_list *t)
unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+ /* Hold a node reference for outstanding queued work */
+ if (!lpfc_nlp_get(ndlp))
+ return;
+
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_nlp_put(ndlp);
return;
}
- /* We need to hold the node by incrementing the reference
- * count until the queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_ELS_RETRY;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
- }
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_ELS_RETRY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
- return;
+
+ lpfc_worker_wake_up(phba);
}
/**
@@ -7238,7 +7238,7 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
@@ -7290,7 +7290,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
@@ -7298,7 +7298,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
mbox->vport = phba->pport;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (rc == MBX_NOT_FINISHED) {
@@ -7307,7 +7306,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4)
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
+ mp = mbox->ctx_buf;
else
mp = mpsave;
@@ -7350,7 +7349,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
@@ -7358,7 +7357,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
rc = 1;
@@ -7500,9 +7498,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc;
mb = &pmb->u.mb;
- lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
+ lcb_context = pmb->ctx_u.lcb;
ndlp = lcb_context->ndlp;
- pmb->ctx_ndlp = NULL;
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_buf = NULL;
shdr = (union lpfc_sli4_cfg_shdr *)
@@ -7642,7 +7640,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
LPFC_SLI4_MBX_EMBED);
- mbox->ctx_ndlp = (void *)lcb_context;
+ mbox->ctx_u.lcb = lcb_context;
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_els_lcb_rsp;
bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -8639,9 +8637,9 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
ndlp = pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_buf = NULL;
+ rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff);
+ oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff);
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -8745,8 +8743,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- (ox_id << 16 | ctx));
+ mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a7a2309a629f..e42fa9c822b5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -257,7 +257,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_worker_wake_up(phba);
+ return;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
} else {
@@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
-
}
-
- return;
}
/**
@@ -3429,7 +3428,7 @@ static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct serv_parm *sp = &vport->fc_sparam;
@@ -3737,7 +3736,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_mbx_read_top *la;
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
uint8_t attn_type;
/* Unblock ELS traffic */
@@ -3851,8 +3850,8 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
/* The driver calls the state machine with the pmb pointer
* but wants to make sure a stale ctx_buf isn't acted on.
@@ -4066,7 +4065,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
* the dump routine is a single-use construct.
*/
if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->ctx_buf = NULL;
@@ -4089,7 +4088,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -4169,7 +4168,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
@@ -4307,7 +4306,7 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
int rc;
@@ -4431,7 +4430,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -5174,7 +5173,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
+ ndlp = pmb->ctx_ndlp;
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
@@ -5496,7 +5495,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
@@ -5507,7 +5506,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
- (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
+ (ndlp != mb->ctx_ndlp))
continue;
mb->ctx_ndlp = NULL;
@@ -5517,7 +5516,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
list_del(&mb->list);
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
@@ -6357,7 +6356,7 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
pmb->ctx_ndlp = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 88b2e57d90c2..f7a0aa3625f4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
* longer needed. Prevent unintended ctx_buf access as the mbox is
@@ -2217,7 +2217,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -5454,7 +5454,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6347,7 +6347,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -7705,6 +7705,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
"NVME" : " "),
(phba->nvmet_support ? "NVMET" : " "));
+ /* ras_fwlog state */
+ spin_lock_init(&phba->ras_fwlog_lock);
+
/* Initialize the IO buffer list used by driver for SLI3 SCSI */
spin_lock_init(&phba->scsi_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
@@ -13055,7 +13058,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
rc = request_threaded_irq(eqhdl->irq,
&lpfc_sli4_hba_intr_handler,
&lpfc_sli4_hba_intr_handler_th,
- IRQF_ONESHOT, name, eqhdl);
+ 0, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f7c41958036b..e98f1c2b2220 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -102,7 +102,7 @@ lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
{
struct lpfc_dmabuf *mp;
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mp = mbox->ctx_buf;
mbox->ctx_buf = NULL;
/* Release the generic BPL buffer memory. */
@@ -204,10 +204,8 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
uint16_t region_id)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -219,7 +217,6 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
@@ -236,11 +233,8 @@ void
lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- /* Save context so that we can restore after memset */
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -254,7 +248,6 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
return;
}
@@ -372,7 +365,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
/* Save address for later completion and set the owner to host so that
* the FW knows this mailbox is available for processing.
*/
- pmb->ctx_buf = (uint8_t *)mp;
+ pmb->ctx_buf = mp;
mb->mbxOwner = OWN_HOST;
return (0);
}
@@ -1816,7 +1809,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/* Reinitialize the context pointers to avoid stale usage. */
mbox->ctx_buf = NULL;
- mbox->context3 = NULL;
+ memset(&mbox->ctx_u, 0, sizeof(mbox->ctx_u));
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
@@ -2366,8 +2359,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
MAILBOX_t *mb;
int rc = FAILURE;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
+ struct lpfc_rdp_context *rdp_context = mboxq->ctx_u.rdp;
mb = &mboxq->u.mb;
if (mb->mbxStatus)
@@ -2385,9 +2377,8 @@ mbx_failed:
static void
lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error_mbox_free;
@@ -2401,7 +2392,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
/* Save the dma buffer for cleanup in the final completion. */
mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
goto error_mbox_free;
@@ -2416,9 +2407,8 @@ void
lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
int rc;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error;
@@ -2448,7 +2438,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 8e425be7c7c9..c4172791c267 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -300,7 +300,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
int rc;
ndlp = login_mbox->ctx_ndlp;
- save_iocb = login_mbox->context3;
+ save_iocb = login_mbox->ctx_u.save_iocb;
if (mb->mbxStatus == MBX_SUCCESS) {
/* Now that REG_RPI completed successfully,
@@ -640,7 +640,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!login_mbox->ctx_ndlp)
goto out;
- login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+ login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
@@ -682,8 +682,8 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_nodelist *ndlp;
uint32_t cmd;
- elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
- ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
+ elsiocb = mboxq->ctx_u.save_iocb;
+ ndlp = mboxq->ctx_ndlp;
vport = mboxq->vport;
cmd = elsiocb->drvrTimeout;
@@ -1875,7 +1875,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
mb->ctx_ndlp = NULL;
@@ -1886,7 +1886,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 09c53b85bcb8..c5792eaf3f64 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2616,9 +2616,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 8258b771bd00..561ced5503c6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
wqe = &nvmewqe->wqe;
/* Initialize WQE */
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c0038eaae7b0..4a6e5223a224 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -167,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
unsigned long new_queue_depth;
- unsigned long num_rsrc_err, num_cmd_success;
+ unsigned long num_rsrc_err;
int i;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
- num_cmd_success = atomic_read(&phba->num_cmd_success);
/*
* The error and success command counters are global per
@@ -186,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
- new_queue_depth =
- sdev->queue_depth * num_rsrc_err /
- (num_rsrc_err + num_cmd_success);
- if (!new_queue_depth)
- new_queue_depth = sdev->queue_depth - 1;
+ if (num_rsrc_err >= sdev->queue_depth)
+ new_queue_depth = 1;
else
new_queue_depth = sdev->queue_depth -
- new_queue_depth;
+ num_rsrc_err;
scsi_change_queue_depth(sdev, new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
- atomic_set(&phba->num_cmd_success, 0);
}
/**
@@ -5336,16 +5331,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
- if (vport->phba->cfg_enable_bg) {
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_SCSI_CMD,
- "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
- "x%x reftag x%x cnt %u pt %x\n",
- cmnd->cmnd[0],
- scsi_prot_ref_tag(cmnd),
- scsi_logical_block_count(cmnd),
- (cmnd->cmnd[1]>>5));
- }
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1f8a9b5945cb..a028e008dd1e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
phba->hba_flag |= HBA_RRQ_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if (empty)
lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
out:
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -2830,7 +2830,7 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
- pmbox_done = (struct completion *)pmboxq->context3;
+ pmbox_done = pmboxq->ctx_u.mbox_wait;
if (pmbox_done)
complete(pmbox_done);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -2885,7 +2885,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (mp) {
pmb->ctx_buf = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2914,12 +2914,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
/* Check to see if there are any deferred events to process */
if (ndlp) {
@@ -2952,7 +2952,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* This nlp_put pairs with lpfc_sli4_resume_rpi */
if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
@@ -5819,7 +5819,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
goto out_free_mboxq;
}
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -6849,9 +6849,9 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
@@ -6894,9 +6894,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
}
/**
@@ -6998,9 +6998,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = ACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -7032,9 +7032,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
@@ -7095,9 +7095,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = REG_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -8766,7 +8766,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
if (rc == MBX_SUCCESS) {
memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
rc = 0;
@@ -9548,8 +9548,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
- lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
+ lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
(uint8_t *)phba->mbox_ext,
pmbox->in_ext_byte_len);
}
@@ -9562,10 +9562,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf)
lpfc_memcpy_to_slim(phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
- pmbox->ctx_buf, pmbox->in_ext_byte_len);
+ pmbox->ext_buf, pmbox->in_ext_byte_len);
if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* copy command data into host mbox for cmpl */
@@ -9688,9 +9688,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
- pmbox->ctx_buf,
+ pmbox->ext_buf,
pmbox->out_ext_byte_len);
}
} else {
@@ -9698,9 +9698,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_memcpy_from_slim(
- pmbox->ctx_buf,
+ pmbox->ext_buf,
phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
pmbox->out_ext_byte_len);
@@ -11373,18 +11373,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
unsigned long iflags;
struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+ /* Hold a node reference for outstanding queued work */
+ if (!lpfc_nlp_get(ndlp))
+ return;
+
spin_lock_irqsave(&phba->hbalock, iflags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_nlp_put(ndlp);
return;
}
- /* Incrementing the reference count until the queued work is done. */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- if (!evtp->evt_arg1) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
- }
+ evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_RECOVER_PORT;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -13262,9 +13262,9 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
- /* setup context3 field to pass wait_queue pointer to wake function */
+ /* setup ctx_u field to pass wait_queue pointer to wake function */
init_completion(&mbox_done);
- pmboxq->context3 = &mbox_done;
+ pmboxq->ctx_u.mbox_wait = &mbox_done;
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
@@ -13272,7 +13272,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
- pmboxq->context3 = NULL;
+ pmboxq->ctx_u.mbox_wait = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
@@ -13813,10 +13813,10 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
if (pmb->out_ext_byte_len &&
- pmb->ctx_buf)
+ pmb->ext_buf)
lpfc_sli_pcimem_bcopy(
phba->mbox_ext,
- pmb->ctx_buf,
+ pmb->ext_buf,
pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -13830,10 +13830,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
pmbox->un.varWords[0], 0);
if (!pmbox->mbxStatus) {
- mp = (struct lpfc_dmabuf *)
- (pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)
- pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was
* successful. new lets get
@@ -14340,8 +14338,8 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
mcqe_status,
pmbox->un.varWords[0], 0);
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was successful. Mark the
* node as having an UNREG_LOGIN in progress to stop
@@ -19823,14 +19821,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
- * @arg: data to load as MBox 'caller buffer information'
+ * @iocbq: data to load as mbox ctx_u information
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
- void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
@@ -19859,7 +19858,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
- mboxq->ctx_buf = arg;
+ mboxq->ctx_u.save_iocb = iocbq;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mboxq->ctx_ndlp = ndlp;
@@ -20676,7 +20675,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
goto out;
mqe = &mboxq->u.mqe;
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc)
goto out;
@@ -21035,7 +21034,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ act_mbx_ndlp = mb->ctx_ndlp;
/* This reference is local to this routine. The
* reference is removed at routine exit.
@@ -21064,7 +21063,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
@@ -21084,7 +21083,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
spin_lock(&ndlp->lock);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index c911a39cb46b..cf7c42ec0306 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -182,11 +182,29 @@ typedef struct lpfcMboxq {
struct lpfc_mqe mqe;
} u;
struct lpfc_vport *vport; /* virtual port pointer */
- void *ctx_ndlp; /* an lpfc_nodelist pointer */
- void *ctx_buf; /* an lpfc_dmabuf pointer */
- void *context3; /* a generic pointer. Code must
- * accommodate the actual datatype.
- */
+ struct lpfc_nodelist *ctx_ndlp; /* caller ndlp pointer */
+ struct lpfc_dmabuf *ctx_buf; /* caller buffer information */
+ void *ext_buf; /* extended buffer for extended mbox
+ * cmds. Not a generic pointer.
+ * Use for storing virtual address.
+ */
+
+ /* Pointers that are seldom used during mbox execution, but require
+ * a saved context.
+ */
+ union {
+ unsigned long ox_rx_id; /* Used in els_rsp_rls_acc */
+ struct lpfc_rdp_context *rdp; /* Used in get_rdp_info */
+ struct lpfc_lcb_context *lcb; /* Used in set_beacon */
+ struct completion *mbox_wait; /* Used in issue_mbox_wait */
+ struct bsg_job_data *dd_data; /* Used in bsg_issue_mbox_cmpl
+ * and
+ * bsg_issue_mbox_ext_handle_job
+ */
+ struct lpfc_iocbq *save_iocb; /* Used in defer_plogi_acc and
+ * lpfc_mbx_cmpl_resume_rpi
+ */
+ } ctx_u;
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 2541a8fba093..c1e9ec0243ba 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1118,8 +1118,9 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
- void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 56f5889dbaf9..915f2f11fb55 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.0"
+#define LPFC_DRIVER_VERSION "14.4.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 0f79840b9498..4439167a5188 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -166,7 +166,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
@@ -674,10 +674,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_free_sysfs_attr(vport);
lpfc_debugfs_terminate(vport);
- /* Remove FC host to break driver binding. */
- fc_remove_host(shost);
- scsi_remove_host(shost);
-
/* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
@@ -721,6 +717,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
skip_logo:
+ /* Remove FC host to break driver binding. */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
lpfc_cleanup(vport);
/* Remove scsi host now. The nodes are cleaned up. */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 0380996b5ad2..55d590b91947 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -1644,7 +1644,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
if ((mpirep_offset != 0xFF) &&
drv_bufs[mpirep_offset].bsg_buf_len) {
drv_buf_iter = &drv_bufs[mpirep_offset];
- drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
mrioc->reply_sz);
bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index ca2e932dd9b7..f684eb5e0489 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrb_devstate_name(ldev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
} else {
struct myrb_pdev_state *pdev_info = sdev->hostdata;
@@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev,
else
name = myrb_devstate_name(pdev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->state);
}
return ret;
@@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev,
name = myrb_raidlevel_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
- return snprintf(buf, 32, "Physical Drive\n");
+ return snprintf(buf, 64, "Physical Drive\n");
}
static DEVICE_ATTR_RO(raid_level);
@@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < myrb_logical_channel(sdev->host))
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
status = myrb_get_rbld_progress(cb, &rbld_buf);
if (rbld_buf.ldev_num != sdev->id ||
status != MYRB_STATUS_SUCCESS)
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
- return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ return snprintf(buf, 64, "rebuilding block %u of %u\n",
rbld_buf.ldev_size - rbld_buf.blocks_left,
rbld_buf.ldev_size);
}
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index a1eec65a9713..e824be9d9bbb 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrs_devstate_name(ldev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else {
struct myrs_pdev_info *pdev_info;
@@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev,
pdev_info = sdev->hostdata;
name = myrs_devstate_name(pdev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->dev_state);
}
return ret;
@@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev,
ldev_info = sdev->hostdata;
name = myrs_raid_level_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else
name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
static DEVICE_ATTR_RO(raid_level);
@@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
ldev_info = sdev->hostdata;
ldev_num = ldev_info->ldev_num;
@@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev,
return -EIO;
}
if (ldev_info->rbld_active) {
- return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
(size_t)ldev_info->rbld_lba,
(size_t)ldev_info->cfg_devsize);
} else
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
}
static ssize_t rebuild_store(struct device *dev,
@@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev,
unsigned short ldev_num;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not checking\n");
+ return snprintf(buf, 64, "physical device - not checking\n");
ldev_info = sdev->hostdata;
if (!ldev_info)
@@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev,
ldev_num = ldev_info->ldev_num;
myrs_get_ldev_info(cs, ldev_num, ldev_info);
if (ldev_info->cc_active)
- return snprintf(buf, 32, "checking block %zu of %zu\n",
+ return snprintf(buf, 64, "checking block %zu of %zu\n",
(size_t)ldev_info->cc_lba,
(size_t)ldev_info->cfg_devsize);
else
- return snprintf(buf, 32, "not checking\n");
+ return snprintf(buf, 64, "not checking\n");
}
static ssize_t consistency_check_store(struct device *dev,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e8bcc3a88732..0614b7e366b7 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -61,7 +61,9 @@ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
* pmcraid_minor - minor number(s) to use
*/
static unsigned int pmcraid_major;
-static struct class *pmcraid_class;
+static const struct class pmcraid_class = {
+ .name = PMCRAID_DEVFILE,
+};
static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
@@ -4723,7 +4725,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
if (error)
pmcraid_release_minor(minor);
else
- device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
+ device_create(&pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
NULL, "%s%u", PMCRAID_DEVFILE, minor);
return error;
}
@@ -4739,7 +4741,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
{
pmcraid_release_minor(MINOR(pinstance->cdev.dev));
- device_destroy(pmcraid_class,
+ device_destroy(&pmcraid_class,
MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
cdev_del(&pinstance->cdev);
}
@@ -5390,10 +5392,10 @@ static int __init pmcraid_init(void)
}
pmcraid_major = MAJOR(dev);
- pmcraid_class = class_create(PMCRAID_DEVFILE);
- if (IS_ERR(pmcraid_class)) {
- error = PTR_ERR(pmcraid_class);
+ error = class_register(&pmcraid_class);
+
+ if (error) {
pmcraid_err("failed to register with sysfs, error = %x\n",
error);
goto out_unreg_chrdev;
@@ -5402,7 +5404,7 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
if (error) {
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
goto out_unreg_chrdev;
}
@@ -5413,7 +5415,7 @@ static int __init pmcraid_init(void)
pmcraid_err("failed to register pmcraid driver, error = %x\n",
error);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
pmcraid_netlink_release();
out_unreg_chrdev:
@@ -5432,7 +5434,7 @@ static void __exit pmcraid_exit(void)
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
}
module_init(pmcraid_init);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44449c70a375..76eeba435fd0 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(fcport->vha);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
}
@@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(vha);
qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
0, WAIT_TARGET);
return;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index deb642607deb..2f49baf131e2 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -82,7 +82,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
+#define QLA2XXX_MANUFACTURER "Marvell"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 26e6b3e3af43..dcde55c8ee5d 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
app_reply->elem[pcnt].rekey_count =
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 09cb9413670a..7309310d2ab9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a314cfc5b263..8377624d76c9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1193,8 +1193,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /*
+ * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
+ * kref_put is call behind the scene.
+ */
+ sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;
+ qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);
fcport->flags &= ~(FCF_ASYNC_SENT);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE);
@@ -2665,6 +2669,40 @@ exit:
return rval;
}
+static void qla_enable_fce_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8033,
+ "Unable to reinitialize FCE (%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+}
+
+static void qla_enable_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8034,
+ "Unable to reinitialize EFT (%d).\n", rval);
+ }
+ }
+}
/*
* qla2x00_initialize_adapter
* Initialize board.
@@ -3668,9 +3706,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
}
static void
-qla2x00_init_fce_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3699,27 +3736,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
- ha->fce_mb, &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00bf,
- "Unable to initialize FCE (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
- ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+ ha->fce_bufs = FCE_NUM_BUFFERS;
}
static void
-qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3744,14 +3771,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
@@ -3759,13 +3778,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
ha->eft = tc;
}
-static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
-{
- qla2x00_init_fce_trace(vha);
- qla2x00_init_eft_trace(vha);
-}
-
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
@@ -3820,10 +3832,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- qla2x00_init_fce_trace(vha);
+ qla2x00_alloc_fce_trace(vha);
if (ha->fce)
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
- qla2x00_init_eft_trace(vha);
+ qla2x00_alloc_eft_trace(vha);
if (ha->eft)
eft_size = EFT_SIZE;
}
@@ -4253,7 +4265,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
- uint16_t fw_major_version;
int done_once = 0;
if (IS_P3P_TYPE(ha)) {
@@ -4320,7 +4331,6 @@ execute_fw_with_lr:
goto failed;
enable_82xx_npiv:
- fw_major_version = ha->fw_major_version;
if (IS_P3P_TYPE(ha))
qla82xx_check_md_needed(vha);
else
@@ -4349,12 +4359,11 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && !(IS_P3P_TYPE(ha)))
- qla2x00_alloc_offload_mem(vha);
-
if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+ qla_enable_fce_trace(vha);
+ qla_enable_eft_trace(vha);
} else {
goto failed;
}
@@ -7487,12 +7496,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
int
qla2x00_abort_isp(scsi_qla_host_t *vha)
{
- int rval;
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *tvp;
struct req_que *req = ha->req_q_map[0];
unsigned long flags;
+ fc_port_t *fcport;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -7561,6 +7570,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
"ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
return status;
}
+
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
if (!qla2x00_restart_isp(vha)) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
@@ -7581,31 +7599,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (IS_QLA81XX(ha) || IS_QLA8031(ha))
qla2x00_get_fw_version(vha);
- if (ha->fce) {
- ha->flags.fce_enabled = 1;
- memset(ha->fce, 0,
- fce_calc_size(ha->fce_bufs));
- rval = qla2x00_enable_fce_trace(vha,
- ha->fce_dma, ha->fce_bufs, ha->fce_mb,
- &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8033,
- "Unable to reinitialize FCE "
- "(%d).\n", rval);
- ha->flags.fce_enabled = 0;
- }
- }
- if (ha->eft) {
- memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(vha,
- ha->eft_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8034,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
- }
- }
} else { /* failed the ISP abort */
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
@@ -7655,6 +7649,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vp->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
qla2x00_vp_abort_isp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index df90169f8244..0b41e8a06602 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2587,6 +2587,33 @@ void
qla2x00_sp_release(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct scsi_qla_host *vha = sp->vha;
+
+ switch (sp->type) {
+ case SRB_CT_PTHRU_CMD:
+ /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
+ if (sp->u.iocb_cmd.u.ctarg.req &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
+ default:
+ break;
+ }
sp->free(sp);
}
@@ -2610,7 +2637,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
{
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- kfree(sp->fcport);
+ if (sp->fcport)
+ qla2x00_free_fcport(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
@@ -2692,7 +2720,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
*/
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
return -ENOMEM;
@@ -2723,6 +2751,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_logo.els_logo_pyld) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -2747,6 +2776,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (rval != QLA_SUCCESS) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -3012,7 +3042,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, bool wait)
+ fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -3027,8 +3057,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -3037,9 +3066,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_io, vha, 0x3073,
"%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- if (wait)
- sp->flags = SRB_WAKEUP_ON_COMP;
-
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
@@ -3055,7 +3081,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
@@ -3064,7 +3090,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_resp_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
@@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
struct fc_els_flogi *p = ptr;
-
p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
}
@@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
- init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ goto done_free_sp;
} else {
ql_dbg(ql_dbg_disc, vha, 0x3074,
"%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
@@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
fcport->d_id.b24, vha->d_id.b24);
}
- if (wait) {
- wait_for_completion(&elsio->u.els_plogi.comp);
-
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
- } else {
- goto done;
- }
+ return rval;
-out:
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+done_free_sp:
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
return rval;
}
@@ -3918,7 +3938,7 @@ qla2x00_start_sp(srb_t *sp)
return -EAGAIN;
}
- pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
if (!pkt) {
rval = -EAGAIN;
ql_log(ql_log_warn, vha, 0x700c,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 21ec32b4fb28..0cd6f3e14882 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
ha->flags.eeh_busy) {
ql_log(ql_log_warn, vha, 0xd035,
- "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
rval = QLA_ABORTED;
goto premature_exit;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dd674378f2f3..1e2f52210f60 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4602,6 +4602,7 @@ fail_free_init_cb:
ha->init_cb_dma = 0;
fail_free_vp_map:
kfree(ha->vp_map);
+ ha->vp_map = NULL;
fail:
ql_log(ql_log_fatal, NULL, 0x0030,
"Memory allocation failure.\n");
@@ -5583,7 +5584,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
break;
case QLA_EVT_ELS_PLOGI:
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- e->u.fcport.fcport, false);
+ e->u.fcport.fcport);
break;
case QLA_EVT_SA_REPLACE:
rc = qla24xx_issue_sa_replace_iocb(vha, e);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2ef2dbac0db2..d7551b1443e4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1062,6 +1062,16 @@ void qlt_free_session_done(struct work_struct *work)
"%s: sess %p logout completed\n", __func__, sess);
}
+ /* check for any straggling io left behind */
+ if (!(sess->flags & FCF_FCP2_DEVICE) &&
+ qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x3027,
+ "IO not return. Resetting.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
+
if (sess->logo_ack_needed) {
sess->logo_ack_needed = 0;
qla24xx_async_notify_ack(vha, sess,
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d903563e969e..7627fd807bc3 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.09.100-k"
+#define QLA2XXX_VERSION "10.02.09.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 9
-#define QLA_DRIVER_BETA_VER 100
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2e28e2360c85..5b3230ef51fe 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -635,10 +635,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
- if (!blk_rq_is_passthrough(req)) {
- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
- cmd->flags &= ~SCMD_INITIALIZED;
- }
+ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+ !(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 8d06475de17a..ffd7e7e72933 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1642,6 +1642,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
+int scsi_resume_device(struct scsi_device *sdev)
+{
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
+
+ device_lock(dev);
+
+ /*
+ * Bail out if the device or its queue are not running. Otherwise,
+ * the rescan may block waiting for commands to be executed, with us
+ * holding the device lock. This can result in a potential deadlock
+ * in the power management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING ||
+ blk_queue_pm_only(sdev->request_queue)) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
+ if (dev->driver && try_module_get(dev->driver->owner)) {
+ struct scsi_driver *drv = to_scsi_driver(dev->driver);
+
+ if (drv->resume)
+ ret = drv->resume(dev);
+ module_put(dev->driver->owner);
+ }
+
+unlock:
+ device_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_resume_device);
+
int scsi_rescan_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ccff8f2e2e75..58fdf679341d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3920,7 +3920,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->disk_dev);
+ device_unregister(&sdkp->disk_dev);
put_disk(gd);
goto out;
}
@@ -4108,7 +4108,21 @@ static int sd_suspend_runtime(struct device *dev)
return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev, bool runtime)
+static int sd_resume(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+
+ if (opal_unlock_from_suspend(sdkp->opal_dev)) {
+ sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sd_resume_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret;
@@ -4124,7 +4138,7 @@ static int sd_resume(struct device *dev, bool runtime)
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
- opal_unlock_from_suspend(sdkp->opal_dev);
+ sd_resume(dev);
sdkp->suspended = false;
}
@@ -4143,7 +4157,7 @@ static int sd_resume_system(struct device *dev)
return 0;
}
- return sd_resume(dev, false);
+ return sd_resume_common(dev, false);
}
static int sd_resume_runtime(struct device *dev)
@@ -4170,7 +4184,7 @@ static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev, true);
+ return sd_resume_common(dev, true);
}
static const struct dev_pm_ops sd_pm_ops = {
@@ -4193,6 +4207,7 @@ static struct scsi_driver sd_template = {
.pm = &sd_pm_ops,
},
.rescan = sd_rescan,
+ .resume = sd_resume,
.init_command = sd_init_command,
.uninit_command = sd_uninit_command,
.done = sd_done,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 86210e4dd0d3..baf870a03ecf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
+ struct scsi_device *device;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
- retval = scsi_device_get(sdp->device);
+ device = sdp->device;
+ retval = scsi_device_get(device);
if (retval)
goto sg_put;
- retval = scsi_autopm_get_device(sdp->device);
+ retval = scsi_autopm_get_device(device);
if (retval)
goto sdp_put;
@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
- scsi_block_when_processing_errors(sdp->device))) {
+ scsi_block_when_processing_errors(device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
- q = sdp->device->request_queue;
+ q = device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
@@ -370,10 +372,11 @@ out_undo:
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
- scsi_autopm_put_device(sdp->device);
+ scsi_autopm_put_device(device);
sdp_put:
- scsi_device_put(sdp->device);
- goto sg_put;
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
+ return retval;
}
/* Release resources associated with a successful sg_open()
@@ -1424,7 +1427,9 @@ static const struct file_operations sg_fops = {
.llseek = no_llseek,
};
-static struct class *sg_sysfs_class;
+static const struct class sg_sysfs_class = {
+ .name = "scsi_generic"
+};
static int sg_sysfs_valid = 0;
@@ -1526,7 +1531,7 @@ sg_add_device(struct device *cl_dev)
if (sg_sysfs_valid) {
struct device *sg_class_member;
- sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ sg_class_member = device_create(&sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", sdp->name);
@@ -1616,7 +1621,7 @@ sg_remove_device(struct device *cl_dev)
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
- device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+ device_destroy(&sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
@@ -1687,11 +1692,9 @@ init_sg(void)
SG_MAX_DEVS, "sg");
if (rc)
return rc;
- sg_sysfs_class = class_create("scsi_generic");
- if ( IS_ERR(sg_sysfs_class) ) {
- rc = PTR_ERR(sg_sysfs_class);
+ rc = class_register(&sg_sysfs_class);
+ if (rc)
goto err_out;
- }
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
@@ -1700,7 +1703,7 @@ init_sg(void)
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
@@ -1715,7 +1718,7 @@ exit_sg(void)
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
@@ -2207,6 +2210,7 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ struct scsi_device *device = sdp->device;
Sg_request *srp;
unsigned long iflags;
@@ -2232,8 +2236,8 @@ sg_remove_sfp_usercontext(struct work_struct *work)
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
- scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
module_put(THIS_MODULE);
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 338aa8c42968..5a9bcf8e0792 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -87,7 +87,7 @@ static int try_rdio = 1;
static int try_wdio = 1;
static int debug_flag;
-static struct class st_sysfs_class;
+static const struct class st_sysfs_class;
static const struct attribute_group *st_dev_groups[];
static const struct attribute_group *st_drv_groups[];
@@ -4438,7 +4438,7 @@ static void scsi_tape_release(struct kref *kref)
return;
}
-static struct class st_sysfs_class = {
+static const struct class st_sysfs_class = {
.name = "scsi_tape",
.dev_groups = st_dev_groups,
};
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 50c664b65f4d..1b7afb19ccd6 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -72,6 +72,7 @@ config MTK_SOCINFO
tristate "MediaTek SoC Information"
default y
depends on NVMEM_MTK_EFUSE
+ select SOC_BUS
help
The MediaTek SoC Information (mtk-socinfo) driver provides
information about the SoC to the userspace including the
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index c832f5c670bc..9a91298c1253 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1768,6 +1768,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1819,10 +1820,12 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
}
if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(bdata->tzone_name);
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- bdata->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 7cd24bd8e224..6bcf8e75273c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -130,6 +130,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
}
+static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 wake_ctrl;
+
+ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ if (enable)
+ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
+ else
+ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
+
+ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -1095,6 +1108,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
/*
@@ -1121,6 +1135,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
return 0;
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
ret = amd_sdw_clock_stop(amd_manager);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 418b679e0b1a..707065468e05 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -152,7 +152,7 @@
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
+#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
@@ -190,6 +190,7 @@
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
+#define AMD_SDW_WAKE_INTR_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 079035db7dd8..92a662d1b55c 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -852,39 +852,39 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_controller_put;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
dev_name(&pdev->dev), fsl_lpspi);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(fsl_lpspi->clk_per)) {
ret = PTR_ERR(fsl_lpspi->clk_per);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk_ipg)) {
ret = PTR_ERR(fsl_lpspi->clk_ipg);
- goto out_controller_put;
+ return ret;
}
/* enable the clock */
ret = fsl_lpspi_init_rpm(fsl_lpspi);
if (ret)
- goto out_controller_put;
+ return ret;
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
@@ -945,8 +945,6 @@ out_pm_get:
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_put_sync(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
-out_controller_put:
- spi_controller_put(controller);
return ret;
}
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 969965d7bc98..cc18d320370f 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -725,6 +725,8 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
sizeof(struct pci1xxxx_spi_internal),
GFP_KERNEL);
+ if (!spi_bus->spi_int[iter])
+ return -ENOMEM;
spi_sub_ptr = spi_bus->spi_int[iter];
spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
if (!spi_sub_ptr->spi_host)
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 9fcbe040cb2f..f726d8670428 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -430,7 +430,7 @@ static bool s3c64xx_spi_can_dma(struct spi_controller *host,
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
if (sdd->rx_dma.ch && sdd->tx_dma.ch)
- return xfer->len > sdd->fifo_depth;
+ return xfer->len >= sdd->fifo_depth;
return false;
}
@@ -826,10 +826,9 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
return status;
}
- if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+ if (!is_polling(sdd) && xfer->len >= fifo_len &&
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
-
} else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 258aa0e37f55..4c3684dd902e 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -937,8 +937,9 @@ static int create_component(struct vchiq_mmal_instance *instance,
/* build component create message */
m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
m.u.component_create.client_component = component->client_component;
- strncpy(m.u.component_create.name, name,
- sizeof(m.u.component_create.name));
+ strscpy_pad(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+ m.u.component_create.pid = 0;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_create),
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 679720021183..d9a6242264b7 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -583,7 +583,7 @@ int iscsit_dataout_datapduinorder_no_fbit(
struct iscsi_pdu *pdu)
{
int i, send_recovery_r2t = 0, recovery = 0;
- u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+ u32 length = 0, offset = 0, pdu_count = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *first_pdu = NULL;
@@ -596,7 +596,6 @@ int iscsit_dataout_datapduinorder_no_fbit(
if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
if (!first_pdu)
first_pdu = &cmd->pdu_list[i];
- xfer_len += cmd->pdu_list[i].length;
pdu_count++;
} else if (pdu_count)
break;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c1fbcdd16182..c40217f44b1b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3672,6 +3672,8 @@ static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
+ struct cred *kern_cred;
+ const struct cred *old_cred;
int ret;
pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
@@ -3748,11 +3750,21 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ /* We use the kernel credentials to access the target directory */
+ kern_cred = prepare_kernel_cred(&init_task);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
target_init_dbroot();
+ revert_creds(old_cred);
+ put_cred(kern_cred);
return 0;
out:
+ target_xcopy_release_pt();
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 50dec24e967a..8fd7cf1932cd 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -214,7 +214,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
res = dfc->power_ops->get_real_power(df, power, freq, voltage);
if (!res) {
- state = dfc->capped_state;
+ state = dfc->max_state - dfc->capped_state;
/* Convert EM power into milli-Watts first */
rcu_read_lock();
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 1b17dc4c219c..e25e48d76aa7 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -606,7 +606,7 @@ static int allocate_actors_buffer(struct power_allocator_params *params,
/* There might be no cooling devices yet. */
if (!num_actors) {
- ret = -EINVAL;
+ ret = 0;
goto clean_state;
}
@@ -679,11 +679,6 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -ENOMEM;
get_governor_trips(tz, params);
- if (!params->trip_max) {
- dev_warn(&tz->device, "power_allocator: missing trip_max\n");
- kfree(params);
- return -EINVAL;
- }
ret = check_power_actors(tz, params);
if (ret < 0) {
@@ -714,9 +709,10 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
else
params->sustainable_power = tz->tzp->sustainable_power;
- estimate_pid_constants(tz, tz->tzp->sustainable_power,
- params->trip_switch_on,
- params->trip_max->temperature);
+ if (params->trip_max)
+ estimate_pid_constants(tz, tz->tzp->sustainable_power,
+ params->trip_switch_on,
+ params->trip_max->temperature);
reset_pid_controller(params);
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c617e8b9f0dd..d78d54ae2605 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze->trip_stats[trip_id].timestamp = now;
tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+ tze->trip_stats[trip_id].count++;
tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
(temperature - tze->trip_stats[trip_id].avg) /
tze->trip_stats[trip_id].count;
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 09f6050dd041..497abf0d47ca 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -65,7 +65,6 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
const struct thermal_trip *trip;
int low = -INT_MAX, high = INT_MAX;
- bool same_trip = false;
int ret;
lockdep_assert_held(&tz->lock);
@@ -74,36 +73,22 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
return;
for_each_trip(tz, trip) {
- bool low_set = false;
int trip_low;
trip_low = trip->temperature - trip->hysteresis;
- if (trip_low < tz->temperature && trip_low > low) {
+ if (trip_low < tz->temperature && trip_low > low)
low = trip_low;
- low_set = true;
- same_trip = false;
- }
if (trip->temperature > tz->temperature &&
- trip->temperature < high) {
+ trip->temperature < high)
high = trip->temperature;
- same_trip = low_set;
- }
}
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
return;
- /*
- * If "high" and "low" are the same, skip the change unless this is the
- * first time.
- */
- if (same_trip && (tz->prev_low_trip != -INT_MAX ||
- tz->prev_high_trip != INT_MAX))
- return;
-
tz->prev_low_trip = low;
tz->prev_high_trip = high;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 6ffc4e81ffed..326433df5880 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3180,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3441,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3490,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3519,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index c5ce7a694b27..3e44c78ac409 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1801,6 +1801,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -2936,7 +2942,7 @@ static int tb_resume_noirq(struct tb *tb)
if (!tb_switch_is_usb4(tb->root_switch))
tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -3062,7 +3068,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index feed8ecaf712..18aae4ccaed5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -827,7 +827,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1288,6 +1288,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 9860b49d7a2b..78b06e922fda 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a3acbf0f5da1..1300c92b8702 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -356,9 +356,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
+ clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0 && p->uartclk != rate) {
- clk_disable_unprepare(d->clk);
+ if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -366,8 +366,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
- clk_prepare_enable(d->clk);
}
+ clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 7984ee05af1d..47e1a056a60c 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -151,7 +151,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
ret = uart_read_port_properties(&uart.port);
if (ret)
- return ret;
+ goto dis_uart_clk;
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0d35c77fad9e..e2e4f99f9d34 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5010,12 +5010,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_4_460800 },
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4749331fe618..1e8853eae504 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 05d97e89511e..92195f984de1 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index c74c548f0db6..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ff85ebd3a007..c476d884356d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
@@ -1788,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* Free the transmit buffer.
*/
uart_port_lock_irq(uport);
+ uart_circ_clear(&state->xmit);
buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock_irq(uport);
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 22b9eeb23e68..7e3a1c7b097c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -39,8 +39,12 @@ static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+unlock:
uart_port_unlock_irqrestore(port, flags);
out:
@@ -60,6 +64,11 @@ static int serial_port_runtime_suspend(struct device *dev)
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
@@ -71,6 +80,31 @@ static int serial_port_runtime_suspend(struct device *dev)
return busy ? -EBUSY : 0;
}
+static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 58d169e5c1db..4fa5a03ebac0 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -861,6 +861,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -869,11 +870,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -882,6 +886,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -896,6 +901,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -903,6 +909,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -912,9 +919,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1084,6 +1092,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 8db81f1a12d5..768bf87cd80d 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e30fd125988d..a0f8e930167d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3217,7 +3217,9 @@ retry:
/* MCQ mode */
if (is_mcq_enabled(hba)) {
- err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ /* successfully cleared the command, retry if needed */
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
+ err = -EAGAIN;
hba->dev_cmd.complete = NULL;
return err;
}
@@ -9791,7 +9793,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
+ /* Wait err handler finish or trigger err recovery */
+ if (!ufshcd_eh_in_progress(hba))
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
goto enable_scaling;
}
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8d68bd21ae73..7a00004bfd03 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -47,7 +47,7 @@ enum {
TSTBUS_MAX,
};
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
#define QCOM_UFS_MAX_LANE 2
enum {
@@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
@@ -1210,8 +1216,10 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
- !strcmp(clki->name, "core_clk_unipro")) {
- if (is_scale_up)
+ !strcmp(clki->name, "core_clk_unipro")) {
+ if (!clki->max_freq)
+ cycles_in_1us = 150; /* default for backwards compatibility */
+ else if (is_scale_up)
cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
else
cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index bb77de6fa067..009158fef2a8 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -792,7 +792,7 @@ static int uio_mmap_dma_coherent(struct vm_area_struct *vma)
*/
vma->vm_pgoff = 0;
- addr = (void *)mem->addr;
+ addr = (void *)(uintptr_t)mem->addr;
ret = dma_mmap_coherent(mem->dma_device,
vma,
addr,
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index d5f9384df125..13cc35ab5d29 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -60,7 +60,7 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
&uiomem->dma_addr, GFP_KERNEL);
- uiomem->addr = addr ? (phys_addr_t) addr : DMEM_MAP_ERROR;
+ uiomem->addr = addr ? (uintptr_t) addr : DMEM_MAP_ERROR;
++uiomem;
}
priv->refcnt++;
@@ -89,7 +89,7 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
break;
if (uiomem->addr) {
dma_free_coherent(uiomem->dma_device, uiomem->size,
- (void *) uiomem->addr,
+ (void *) (uintptr_t) uiomem->addr,
uiomem->dma_addr);
}
uiomem->addr = DMEM_MAP_ERROR;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 20d9762331bd..6be3462b109f 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
if (pdata->send_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
}
if (pdata->recv_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
}
}
@@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret) {
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
goto fail_close;
}
@@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret) {
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
goto fail_close;
}
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 72b33f7d4c40..f67881cba645 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -191,7 +191,7 @@ static int pruss_probe(struct platform_device *pdev)
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
- p->mem[2].addr = (phys_addr_t) gdev->ddr_vaddr;
+ p->mem[2].addr = (uintptr_t) gdev->ddr_vaddr;
p->mem[2].dma_addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
p->mem[2].memtype = UIO_MEM_DMA_COHERENT;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3ee8455585b6..9446660e231b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -130,7 +130,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
-static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
@@ -720,14 +719,14 @@ static void kick_hub_wq(struct usb_hub *hub)
*/
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface_no_resume(intf);
- kref_get(&hub->kref);
+ hub_get(hub);
if (queue_work(hub_wq, &hub->events))
return;
/* the work has already been scheduled */
usb_autopm_put_interface_async(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
void usb_kick_hub_wq(struct usb_device *hdev)
@@ -1095,7 +1094,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
goto init2;
goto init3;
}
- kref_get(&hub->kref);
+ hub_get(hub);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
@@ -1343,7 +1342,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
device_unlock(&hdev->dev);
}
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
/* Implement the continuations for the delays above */
@@ -1759,6 +1758,16 @@ static void hub_release(struct kref *kref)
kfree(hub);
}
+void hub_get(struct usb_hub *hub)
+{
+ kref_get(&hub->kref);
+}
+
+void hub_put(struct usb_hub *hub)
+{
+ kref_put(&hub->kref, hub_release);
+}
+
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
@@ -1807,7 +1816,7 @@ static void hub_disconnect(struct usb_interface *intf)
onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
@@ -5934,7 +5943,7 @@ out_hdev_lock:
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
kcov_remote_stop();
}
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 43ce21c96a51..183b69dc2955 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -129,6 +129,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub,
extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set);
extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
+extern void hub_get(struct usb_hub *hub);
+extern void hub_put(struct usb_hub *hub);
extern int hub_port_debounce(struct usb_hub *hub, int port1,
bool must_be_connected);
extern int usb_clear_port_feature(struct usb_device *hdev,
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 5b5e613a11e5..0e1262a077ae 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -56,11 +56,22 @@ static ssize_t disable_show(struct device *dev,
u16 portstatus, unused;
bool disabled;
int rc;
+ struct kernfs_node *kn;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -70,9 +81,13 @@ static ssize_t disable_show(struct device *dev,
usb_hub_port_status(hub, port1, &portstatus, &unused);
disabled = !usb_port_is_power_on(hub, portstatus);
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
if (rc)
return rc;
@@ -90,15 +105,26 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
int port1 = port_dev->portnum;
bool disabled;
int rc;
+ struct kernfs_node *kn;
rc = kstrtobool(buf, &disabled);
if (rc)
return rc;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -119,9 +145,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
if (!rc)
rc = count;
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
return rc;
}
@@ -419,8 +449,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index f98263e21c2a..d83231d6736a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1217,14 +1217,24 @@ static ssize_t interface_authorized_store(struct device *dev,
{
struct usb_interface *intf = to_usb_interface(dev);
bool val;
+ struct kernfs_node *kn;
if (kstrtobool(buf, &val) != 0)
return -EINVAL;
- if (val)
+ if (val) {
usb_authorize_interface(intf);
- else
- usb_deauthorize_interface(intf);
+ } else {
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister intf.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (kn) {
+ usb_deauthorize_interface(intf);
+ sysfs_unbreak_active_protection(kn);
+ }
+ }
return count;
}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index c92a1da46a01..a141f83aba0c 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -729,8 +729,14 @@ struct dwc2_dregs_backup {
* struct dwc2_hregs_backup - Holds host registers state before
* entering partial power down
* @hcfg: Backup of HCFG register
+ * @hflbaddr: Backup of HFLBADDR register
* @haintmsk: Backup of HAINTMSK register
+ * @hcchar: Backup of HCCHAR register
+ * @hcsplt: Backup of HCSPLT register
* @hcintmsk: Backup of HCINTMSK register
+ * @hctsiz: Backup of HCTSIZ register
+ * @hdma: Backup of HCDMA register
+ * @hcdmab: Backup of HCDMAB register
* @hprt0: Backup of HPTR0 register
* @hfir: Backup of HFIR register
* @hptxfsiz: Backup of HPTXFSIZ register
@@ -738,8 +744,14 @@ struct dwc2_dregs_backup {
*/
struct dwc2_hregs_backup {
u32 hcfg;
+ u32 hflbaddr;
u32 haintmsk;
+ u32 hcchar[MAX_EPS_CHANNELS];
+ u32 hcsplt[MAX_EPS_CHANNELS];
u32 hcintmsk[MAX_EPS_CHANNELS];
+ u32 hctsiz[MAX_EPS_CHANNELS];
+ u32 hcidma[MAX_EPS_CHANNELS];
+ u32 hcidmab[MAX_EPS_CHANNELS];
u32 hprt0;
u32 hfir;
u32 hptxfsiz;
@@ -1086,6 +1098,7 @@ struct dwc2_hsotg {
bool needs_byte_swap;
/* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_4_30a 0x4f54430a
#define DWC2_CORE_REV_2_71a 0x4f54271a
#define DWC2_CORE_REV_2_72a 0x4f54272a
#define DWC2_CORE_REV_2_80a 0x4f54280a
@@ -1323,6 +1336,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);
/* This function should be called on every hardware interrupt. */
irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 158ede753854..26d752a4c3ca 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
}
@@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
* @hsotg: Programming view of DWC_otg controller
*
*/
-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)
{
u32 glpmcfg;
- u32 i = 0;
+ u32 pcgctl;
+ u32 dctl;
if (hsotg->lx_state != DWC2_L1) {
dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
@@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
- dev_dbg(hsotg->dev, "Exit from L1 state\n");
+ dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);
glpmcfg &= ~GLPMCFG_ENBLSLPM;
- glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
+ glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;
dwc2_writel(hsotg, glpmcfg, GLPMCFG);
- do {
- glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
- if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
- GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
- break;
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_ENBESL) {
+ glpmcfg |= GLPMCFG_RSTRSLPSTS;
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+ }
+
+ if (remotewakeup) {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);
+ goto fail;
+ return;
+ }
+
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
- udelay(1);
- } while (++i < 200);
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);
+ goto fail;
+ return;
+ }
+ }
- if (i == 200) {
- dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||
+ glpmcfg & GLPMCFG_L1RESUMEOK) {
+ goto fail;
return;
}
- dwc2_gadget_init_lpm(hsotg);
+
+ /* Inform gadget to exit from L1 */
+ call_gadget(hsotg, resume);
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+ hsotg->bus_suspended = false;
+fail: dwc2_gadget_init_lpm(hsotg);
} else {
/* TODO */
dev_err(hsotg->dev, "Host side LPM is not supported.\n");
return;
}
-
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
-
- /* Inform gadget to exit from L1 */
- call_gadget(hsotg, resume);
}
/*
@@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
if (hsotg->lx_state == DWC2_L1) {
- dwc2_wakeup_from_lpm_l1(hsotg);
+ dwc2_wakeup_from_lpm_l1(hsotg, false);
return;
}
@@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
} else {
/* Change to L0 state */
@@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
}
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_host_exit_clock_gating(hsotg, 1);
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b517a7216de2..b2f6da5b65cc 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1415,6 +1415,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
ep->name, req, req->length, req->buf, req->no_interrupt,
req->zero, req->short_not_ok);
+ if (hs->lx_state == DWC2_L1) {
+ dwc2_wakeup_from_lpm_l1(hs, true);
+ }
+
/* Prevent new request submission when controller is suspended */
if (hs->lx_state != DWC2_L0) {
dev_dbg(hs->dev, "%s: submit request only in active state\n",
@@ -3727,6 +3731,12 @@ irq_retry:
if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
dwc2_exit_partial_power_down(hsotg, 0, true);
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+
hsotg->lx_state = DWC2_L0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 35c7a4df8e71..dd5b1c5691e1 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the periodic ready schedule to the
@@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the non-periodic inactive schedule to the
@@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
urb->actual_length);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ if (!hsotg->params.dma_desc_enable)
+ urb->start_frame = qtd->qh->start_active_frame;
urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
@@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
}
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
@@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
/* Backup Host regs */
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg, HCFG);
+ hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);
hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));
+ hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));
hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
+ hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));
+ hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));
+ hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));
+ }
hr->hprt0 = dwc2_read_hprt0(hsotg);
hr->hfir = dwc2_readl(hsotg, HFIR);
@@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
hr->valid = false;
dwc2_writel(hsotg, hr->hcfg, HCFG);
+ dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);
dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));
+ dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));
dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
+ dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));
+ dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));
+ dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));
+ }
dwc2_writel(hsotg, hr->hprt0, HPRT0);
dwc2_writel(hsotg, hr->hfir, HFIR);
@@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
dwc2_writel(hsotg, hr->hcfg, HCFG);
/* De-assert Wakeup Logic */
- gpwrdn = dwc2_readl(hsotg, GPWRDN);
- gpwrdn &= ~GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
@@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
hprt0 |= HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
+ /* De-assert Wakeup Logic */
+ if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
/* Wait for Resume time and then program HPRT again */
mdelay(100);
hprt0 &= ~HPRT0_RES;
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 6b4d825e97a2..994a78ad084b 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
idx = qh->td_last;
inc = qh->host_interval;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
- cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ cur_idx = idx;
next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
/*
@@ -866,20 +866,27 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u16 frame_desc_idx;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
+ frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);
- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+ frame_desc = &qtd->urb->iso_descs[frame_desc_idx];
+ if (idx == qtd->isoc_td_first)
+ usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
@@ -900,7 +907,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
frame_desc->status = 0;
}
- if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {
/*
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
@@ -1005,11 +1012,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
return;
idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
chan->speed);
- if (!rc)
+ if (rc == 0)
continue;
- if (rc == DWC2_CMPL_DONE)
- break;
+ if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)
+ goto stop_scan;
/* rc == DWC2_CMPL_STOP */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 13abdd5f6752..12f8c7f86dc9 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -698,7 +698,7 @@
#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
#define TXSTS_QTOP_TOKEN_SHIFT 25
#define TXSTS_QTOP_TERMINATE BIT(24)
-#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_MASK (0x7f << 16)
#define TXSTS_QSPCAVAIL_SHIFT 16
#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
#define TXSTS_FSPCAVAIL_SHIFT 0
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index b1d48019e944..7b84416dfc2b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -331,7 +331,7 @@ static void dwc2_driver_remove(struct platform_device *dev)
/* Exit clock gating when driver is removed. */
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 3e55838c0001..31684cdaaae3 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1519,6 +1519,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
else
dwc->sysdev = dwc->dev;
+ dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+
ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
if (ret >= 0) {
dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c07edfc954f7..7e80dd3d466b 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1133,6 +1133,7 @@ struct dwc3_scratchpad_array {
* 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
+ * @sys_wakeup: set if the device may do system wakeup.
* @wakeup_configured: set if the device is configured for remote wakeup.
* @suspended: set to track suspend event due to U3/L2.
* @imod_interval: set the interrupt moderation interval in 250ns
@@ -1357,6 +1358,7 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
+ unsigned sys_wakeup:1;
unsigned wakeup_configured:1;
unsigned suspended:1;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 39564e17f3b0..497deed38c0c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -51,7 +51,6 @@
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
-#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1
#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -423,7 +422,6 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
- { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 72bb722da2f2..d96ffbe52039 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -226,7 +226,8 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
/* reinitialize physical ep1 */
dep = dwc->eps[1];
- dep->flags = DWC3_EP_ENABLED;
+ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
+ dep->flags |= DWC3_EP_ENABLED;
/* stall is always issued on EP0 */
dep = dwc->eps[0];
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 40c52dbc28d3..4df2661f6675 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2955,6 +2955,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
spin_unlock_irqrestore(&dwc->lock, flags);
+ if (dwc->sys_wakeup)
+ device_wakeup_enable(dwc->sysdev);
+
return 0;
}
@@ -2970,6 +2973,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
dwc->max_cfg_eps = 0;
@@ -4651,6 +4657,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
else
dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+ /* No system wakeup if no gadget driver bound */
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
return 0;
err5:
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 5a5cb6ce9946..0204787df81d 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -173,6 +173,14 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ if (dwc->sys_wakeup) {
+ /* Restore wakeup setting if switched from device */
+ device_wakeup_enable(dwc->sysdev);
+
+ /* Pass on wakeup setting to the new xhci platform device */
+ device_init_wakeup(&xhci->dev, true);
+ }
+
return 0;
err:
platform_device_put(xhci);
@@ -181,6 +189,9 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
+ if (dwc->sys_wakeup)
+ device_init_wakeup(&dwc->xhci->dev, false);
+
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bffbc1dc651f..f855f1fc8e5e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,6 +46,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
MODULE_IMPORT_NS(DMA_BUF);
/* Reference counter handling */
@@ -1578,10 +1580,13 @@ static int ffs_dmabuf_transfer(struct file *file,
struct ffs_dmabuf_priv *priv;
struct ffs_dma_fence *fence;
struct usb_request *usb_req;
+ enum dma_resv_usage resv_dir;
struct dma_buf *dmabuf;
+ unsigned long timeout;
struct ffs_ep *ep;
bool cookie;
u32 seqno;
+ long retl;
int ret;
if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
@@ -1615,17 +1620,14 @@ static int ffs_dmabuf_transfer(struct file *file,
goto err_attachment_put;
/* Make sure we don't have writers */
- if (!dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_WRITE)) {
- pr_vdebug("FFS WRITE fence is not signaled\n");
- ret = -EBUSY;
- goto err_resv_unlock;
- }
-
- /* If we're writing to the DMABUF, make sure we don't have readers */
- if (epfile->in &&
- !dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_READ)) {
- pr_vdebug("FFS READ fence is not signaled\n");
- ret = -EBUSY;
+ timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+ retl = dma_resv_wait_timeout(dmabuf->resv,
+ dma_resv_usage_rw(epfile->in),
+ true, timeout);
+ if (retl == 0)
+ retl = -EBUSY;
+ if (retl < 0) {
+ ret = (int)retl;
goto err_resv_unlock;
}
@@ -1665,8 +1667,9 @@ static int ffs_dmabuf_transfer(struct file *file,
dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
&priv->lock, priv->context, seqno);
- dma_resv_add_fence(dmabuf->resv, &fence->base,
- dma_resv_usage_rw(epfile->in));
+ resv_dir = epfile->in ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base, resv_dir);
dma_resv_unlock(dmabuf->resv);
/* Now that the dma_fence is in place, queue the transfer. */
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 28f4e6552e84..0acc32ed9960 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9d4150124fdb..b3a9d18a8dcd 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -292,7 +292,9 @@ int usb_ep_queue(struct usb_ep *ep,
{
int ret = 0;
- if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ if (!ep->enabled && ep->address) {
+ pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",
+ ep->address, ep->name);
ret = -ESHUTDOWN;
goto out;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e82d03224f94..3432ebfae978 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -868,7 +868,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
int ret;
@@ -878,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->ep.desc)) {
+ if (unlikely(!ep->ep.desc)) {
dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
@@ -887,7 +887,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return -EMSGSIZE;
}
- udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52278afea94b..575f0fd9c9f1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3133,7 +3133,7 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- irqreturn_t ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
spin_lock(&xhci->lock);
@@ -3141,12 +3141,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3156,7 +3157,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3167,7 +3167,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
- ret = IRQ_HANDLED;
/* This is the handler of the primary interrupter */
xhci_handle_events(xhci, xhci->interrupters[0]);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 1740000d54c2..5762564b9d73 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(int, hcd_portnum)
- __field(int, hw_portnum)
+ __field(int, slot_id)
__field(u16, current_mel)
),
@@ -181,13 +180,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum;
- __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum;
+ __entry->slot_id = (int) vdev->slot_id;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d",
- __entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel
+ TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
+ __entry->vdev, __entry->slot_id, __entry->in_ctx,
+ __entry->out_ctx, __entry->current_mel
)
);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index c6101ed2d9d4..d8049275a023 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -78,7 +78,7 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
- return err;
+ goto disable_clk;
}
fsleep(hub->pdata->reset_us);
@@ -87,6 +87,10 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
hub->is_powered_on = true;
return 0;
+
+disable_clk:
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index 35770e608c64..2d30fc1be306 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -518,8 +518,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
int ret;
client = kzalloc(sizeof *client, GFP_KERNEL);
- if (!client)
+ if (!client) {
+ kfree(data);
return -ENOMEM;
+ }
client->type = type;
client->id = id;
@@ -535,8 +537,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
auxdev->dev.release = ljca_auxdev_release;
ret = auxiliary_device_init(auxdev);
- if (ret)
+ if (ret) {
+ kfree(data);
goto err_free;
+ }
ljca_auxdev_acpi_bind(adap, auxdev, adr, id);
@@ -590,12 +594,8 @@ static int ljca_enumerate_gpio(struct ljca_adapter *adap)
valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins);
bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num);
- ret = ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
+ return ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
gpio_info, LJCA_GPIO_ACPI_ADR);
- if (ret)
- kfree(gpio_info);
-
- return ret;
}
static int ljca_enumerate_i2c(struct ljca_adapter *adap)
@@ -629,10 +629,8 @@ static int ljca_enumerate_i2c(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i,
"ljca-i2c", i2c_info,
LJCA_I2C1_ACPI_ADR + i);
- if (ret) {
- kfree(i2c_info);
+ if (ret)
return ret;
- }
}
return 0;
@@ -669,10 +667,8 @@ static int ljca_enumerate_spi(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i,
"ljca-spi", spi_info,
LJCA_SPI1_ACPI_ADR + i);
- if (ret) {
- kfree(spi_info);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 8f735a86cd19..fdcffebf415c 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -262,13 +262,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
"could not get vbus regulator\n");
- nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
- if (PTR_ERR(nop->vbus_draw) == -ENODEV)
- nop->vbus_draw = NULL;
- if (IS_ERR(nop->vbus_draw))
- return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
- "could not get vbus regulator\n");
-
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 55a65d941ccb..8a5846d4adf6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 71ace274761f..08953f0d4532 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -533,7 +533,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
* daft to me.
*/
-static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
{
struct uas_dev_info *devinfo = cmnd->device->hostdata;
struct urb *urb;
@@ -541,30 +541,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
if (!urb)
- return NULL;
+ return -ENOMEM;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, "sense submit err", err);
usb_free_urb(urb);
- return NULL;
}
- return urb;
+ return err;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
- struct urb *urb;
int err;
lockdep_assert_held(&devinfo->lock);
if (cmdinfo->state & SUBMIT_STATUS_URB) {
- urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
- if (!urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ err = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
+ if (err)
+ return err;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
@@ -572,7 +570,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
@@ -582,7 +580,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, "data in submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
@@ -592,7 +590,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
@@ -602,7 +600,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, "data out submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
@@ -611,7 +609,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
if (!cmdinfo->cmd_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
@@ -621,7 +619,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, "cmd submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
@@ -698,7 +696,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
* of queueing, no matter how fatal the error
*/
if (err == -ENODEV) {
- set_host_byte(cmnd, DID_ERROR);
+ set_host_byte(cmnd, DID_NO_CONNECT);
scsi_done(cmnd);
goto zombie;
}
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 389c7f0b8d93..9610e647a8d4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1310,6 +1310,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery *pd;
+ int ret;
if (!port->ops || !port->ops->pd_set)
return -EOPNOTSUPP;
@@ -1318,7 +1319,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
if (!pd)
return -EINVAL;
- return port->ops->pd_set(port, pd);
+ ret = port->ops->pd_set(port, pd);
+ if (ret)
+ return ret;
+
+ return size;
}
static ssize_t select_usb_power_delivery_show(struct device *dev,
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
index 5535932e42cd..4357cc67a867 100644
--- a/drivers/usb/typec/mux/it5205.c
+++ b/drivers/usb/typec/mux/it5205.c
@@ -22,7 +22,7 @@
#include <linux/usb/typec_mux.h>
#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
-#define IT5205FN_CHIP_ID 0x35323035 /* "5205" */
+#define IT5205FN_CHIP_ID 0x35303235 /* "5025" -> "5205" */
/* MUX power down register */
#define IT5205_REG_MUXPDR 0x10
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ae2b6c94482d..ab6ed6111ed0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6855,14 +6855,14 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
if (data->sink_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
port->snk_pdo[i] = data->sink_desc.pdo[i];
- port->nr_snk_pdo = i + 1;
+ port->nr_snk_pdo = i;
port->operating_snk_mw = data->operating_snk_mw;
}
if (data->source_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
- port->snk_pdo[i] = data->source_desc.pdo[i];
- port->nr_src_pdo = i + 1;
+ port->src_pdo[i] = data->source_desc.pdo[i];
+ port->nr_src_pdo = i;
}
switch (port->state) {
@@ -6910,7 +6910,9 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
port->port_source_caps = data->source_cap;
port->port_sink_caps = data->sink_cap;
+ typec_port_set_usb_power_delivery(p, NULL);
port->selected_pd = pd;
+ typec_port_set_usb_power_delivery(p, port->selected_pd);
unlock:
mutex_unlock(&port->lock);
return ret;
@@ -6943,9 +6945,7 @@ static void tcpm_port_unregister_pd(struct tcpm_port *port)
port->port_source_caps = NULL;
for (i = 0; i < port->pd_count; i++) {
usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
- kfree(port->pd_list[i]->sink_cap);
usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
- kfree(port->pd_list[i]->source_cap);
devm_kfree(port->dev, port->pd_list[i]);
port->pd_list[i] = NULL;
usb_power_delivery_unregister(port->pds[i]);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index cf52cb34d285..bd6ae92aa39e 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -151,8 +151,12 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
- if (cci & UCSI_CCI_NOT_SUPPORTED)
+ if (cci & UCSI_CCI_NOT_SUPPORTED) {
+ if (ucsi_acknowledge_command(ucsi) < 0)
+ dev_err(ucsi->dev,
+ "ACK of unsupported command failed\n");
return -EOPNOTSUPP;
+ }
if (cci & UCSI_CCI_ERROR) {
if (cmd == UCSI_GET_ERROR_STATUS)
@@ -1133,17 +1137,21 @@ static int ucsi_check_cable(struct ucsi_connector *con)
if (ret < 0)
return ret;
- ret = ucsi_get_cable_identity(con);
- if (ret < 0)
- return ret;
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE) {
+ ret = ucsi_get_cable_identity(con);
+ if (ret < 0)
+ return ret;
+ }
- ret = ucsi_register_plug(con);
- if (ret < 0)
- return ret;
+ if (con->ucsi->cap.features & UCSI_CAP_ALT_MODE_DETAILS) {
+ ret = ucsi_register_plug(con);
+ if (ret < 0)
+ return ret;
- ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP_P);
- if (ret < 0)
- return ret;
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -1189,8 +1197,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
- ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
- ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
+ if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS)
+ ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD)
@@ -1215,11 +1225,11 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
- clear_bit(EVENT_PENDING, &con->ucsi->flags);
-
mutex_lock(&ucsi->ppm_lock);
+ clear_bit(EVENT_PENDING, &con->ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
mutex_unlock(&ucsi->ppm_lock);
+
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -1237,7 +1247,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
struct ucsi_connector *con = &ucsi->connector[num - 1];
if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
- dev_dbg(ucsi->dev, "Bogus connector change event\n");
+ dev_dbg(ucsi->dev, "Early connector change event\n");
return;
}
@@ -1260,13 +1270,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- u64 command = UCSI_PPM_RESET;
+ u64 command;
unsigned long tmo;
u32 cci;
int ret;
mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * If UCSI_CCI_RESET_COMPLETE is already set we must clear
+ * the flag before we start another reset. Send a
+ * UCSI_SET_NOTIFICATION_ENABLE command to achieve this.
+ * Ignore a timeout and try the reset anyway if this fails.
+ */
+ if (cci & UCSI_CCI_RESET_COMPLETE) {
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ goto out;
+
+ tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
+ do {
+ ret = ucsi->ops->read(ucsi, UCSI_CCI,
+ &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
+ break;
+ if (time_is_before_jiffies(tmo))
+ break;
+ msleep(20);
+ } while (1);
+
+ WARN_ON(cci & UCSI_CCI_RESET_COMPLETE);
+ }
+
+ command = UCSI_PPM_RESET;
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
@@ -1589,8 +1633,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
- ucsi_get_partner_identity(con);
- ucsi_check_cable(con);
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ ucsi_get_partner_identity(con);
+ if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS)
+ ucsi_check_cable(con);
}
/* Only notify USB controller if partner supports USB data */
@@ -1636,6 +1682,7 @@ static int ucsi_init(struct ucsi *ucsi)
{
struct ucsi_connector *con, *connector;
u64 command, ntfy;
+ u32 cci;
int ret;
int i;
@@ -1688,6 +1735,15 @@ static int ucsi_init(struct ucsi *ucsi)
ucsi->connector = connector;
ucsi->ntfy = ntfy;
+
+ mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ mutex_unlock(&ucsi->ppm_lock);
+ if (ret)
+ return ret;
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
+
return 0;
err_unregister:
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 32daf5f58650..0e7c92eb1b22 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -206,7 +206,7 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
u8 num_connectors;
- u8 features;
+ u16 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -215,7 +215,8 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
- u16 reserved_1;
+#define UCSI_CAP_GET_PD_MESSAGE BIT(8)
+ u8 reserved_1;
u8 num_alt_modes;
u8 reserved_2;
u16 bc_version;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 928eacbeb21a..7b3ac133ef86 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -23,10 +23,11 @@ struct ucsi_acpi {
void *base;
struct completion complete;
unsigned long flags;
+#define UCSI_ACPI_SUPPRESS_EVENT 0
+#define UCSI_ACPI_COMMAND_PENDING 1
+#define UCSI_ACPI_ACK_PENDING 2
guid_t guid;
u64 cmd;
- bool dell_quirk_probed;
- bool dell_quirk_active;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -79,9 +80,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
int ret;
if (ack)
- set_bit(ACK_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- set_bit(COMMAND_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
@@ -92,9 +93,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
out_clear_bit:
if (ack)
- clear_bit(ACK_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- clear_bit(COMMAND_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
return ret;
}
@@ -129,51 +130,40 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
};
/*
- * Some Dell laptops expect that an ACK command with the
- * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
- * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
- * If this is not done events are not delivered to OSPM and
- * subsequent commands will timeout.
+ * Some Dell laptops don't like ACK commands with the
+ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE
+ * bit set. To work around this send a dummy command and bundle the
+ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE
+ * for the dummy command.
*/
static int
ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- u64 cmd = *(u64 *)val, ack = 0;
+ u64 cmd = *(u64 *)val;
+ u64 dummycmd = UCSI_GET_CAPABILITY;
int ret;
- if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
- cmd & UCSI_ACK_CONNECTOR_CHANGE)
- ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
-
- ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
- if (ret != 0)
- return ret;
- if (ack == 0)
- return ret;
-
- if (!ua->dell_quirk_probed) {
- ua->dell_quirk_probed = true;
-
- cmd = UCSI_GET_CAPABILITY;
- ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
- sizeof(cmd));
- if (ret == 0)
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
- &ack, sizeof(ack));
- if (ret != -ETIMEDOUT)
+ if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {
+ cmd |= UCSI_ACK_COMMAND_COMPLETE;
+
+ /*
+ * The UCSI core thinks it is sending a connector change ack
+ * and will accept new connector change events. We don't want
+ * this to happen for the dummy command as its response will
+ * still report the very event that the core is trying to clear.
+ */
+ set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,
+ sizeof(dummycmd));
+ clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+
+ if (ret < 0)
return ret;
-
- ua->dell_quirk_active = true;
- dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
- dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
}
- if (!ua->dell_quirk_active)
- return ret;
-
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
}
static const struct ucsi_operations ucsi_dell_ops = {
@@ -209,13 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
if (ret)
return;
- if (UCSI_CCI_CONNECTOR(cci))
+ if (UCSI_CCI_CONNECTOR(cci) &&
+ !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
complete(&ua->complete);
if (cci & UCSI_CCI_COMMAND_COMPLETE &&
- test_bit(COMMAND_PENDING, &ua->flags))
+ test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))
complete(&ua->complete);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 932e7bf69447..ce08eb33e5be 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -255,6 +255,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
static void pmic_glink_ucsi_register(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
+ int orientation;
+ int i;
+
+ for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
+ if (!ucsi->port_orientation[i])
+ continue;
+ orientation = gpiod_get_value(ucsi->port_orientation[i]);
+
+ if (orientation >= 0) {
+ typec_switch_set(ucsi->port_switch[i],
+ orientation ? TYPEC_ORIENTATION_REVERSE
+ : TYPEC_ORIENTATION_NORMAL);
+ }
+ }
ucsi_register(ucsi->ucsi);
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index b246067e074b..6cb96a1e8b7d 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -967,7 +967,7 @@ vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
val_u32 = __virtio32_to_cpu(true, config->size_max);
- return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32);
}
/* fill the block size*/
@@ -1089,7 +1089,7 @@ static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
u8 ro;
ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro))
return -EMSGSIZE;
return 0;
@@ -1100,7 +1100,7 @@ static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
u8 flush;
flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush))
return -EMSGSIZE;
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 045f666b4f12..8995730ce0bf 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2515,7 +2515,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
+ vq_err(vq, "Guest moved avail index from %u to %u",
last_avail_idx, vq->avail_idx);
return -EFAULT;
}
@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return false;
+ }
- return vq->avail_idx == vq->last_avail_idx;
+ return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r);
return false;
}
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return true;
+ }
- return vq->avail_idx != vq->last_avail_idx;
+ return false;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index e3179e987cdb..197b6d5268e9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -494,6 +494,7 @@ config FB_SBUS_HELPERS
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
config FB_BW2
bool "BWtwo support"
@@ -514,6 +515,7 @@ config FB_CG6
depends on (FB = y) && (SPARC && FB_SBUS)
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the CGsix (GX, TurboGX)
frame buffer.
@@ -523,6 +525,7 @@ config FB_FFB
depends on FB_SBUS && SPARC64
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the Creator, Creator3D,
and Elite3D graphics boards.
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index db09fe87fcd4..0ab8848ba2f1 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -144,6 +144,12 @@ config FB_DMAMEM_HELPERS
select FB_SYS_IMAGEBLIT
select FB_SYSMEM_FOPS
+config FB_DMAMEM_HELPERS_DEFERRED
+ bool
+ depends on FB_CORE
+ select FB_DEFERRED_IO
+ select FB_DMAMEM_HELPERS
+
config FB_IOMEM_FOPS
tristate
depends on FB_CORE
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index dae96c9f61cf..806ecd32219b 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c
index b67a28da4702..a1c467a0e9f7 100644
--- a/drivers/virt/vmgenid.c
+++ b/drivers/virt/vmgenid.c
@@ -68,7 +68,6 @@ out:
static void vmgenid_notify(struct acpi_device *device, u32 event)
{
struct vmgenid_state *state = acpi_driver_data(device);
- char *envp[] = { "NEW_VMGENID=1", NULL };
u8 old_id[VMGENID_SIZE];
memcpy(old_id, state->this_id, sizeof(old_id));
@@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
if (!memcmp(old_id, state->this_id, sizeof(old_id)))
return;
add_vmfork_randomness(state->this_id, sizeof(state->this_id));
- kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
}
static const struct acpi_device_id vmgenid_ids[] = {
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f173587893cb..9510c551dce8 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -362,14 +362,16 @@ static const struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-int register_virtio_driver(struct virtio_driver *driver)
+int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
{
/* Catch this early. */
BUG_ON(driver->feature_table_size && !driver->feature_table);
driver->driver.bus = &virtio_bus;
+ driver->driver.owner = owner;
+
return driver_register(&driver->driver);
}
-EXPORT_SYMBOL_GPL(register_virtio_driver);
+EXPORT_SYMBOL_GPL(__register_virtio_driver);
void unregister_virtio_driver(struct virtio_driver *driver)
{
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 29281b7c3887..0d6138bee2a3 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
unsigned int s_cache, unsigned int f_flags)
{
- if (fid->qid.type != P9_QTFILE)
- return;
-
if ((!s_cache) ||
((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
(s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9defa12208f9..1775fcc7f0e8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -179,13 +179,14 @@ extern int v9fs_vfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
-extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid);
+extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid,
+ bool new);
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
extern const struct netfs_request_ops v9fs_req_ops;
extern struct inode *v9fs_fid_iget_dotl(struct super_block *sb,
- struct p9_fid *fid);
+ struct p9_fid *fid, bool new);
/* other default globals */
#define V9FS_PORT 564
@@ -224,12 +225,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
*/
static inline struct inode *
v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+ struct super_block *sb, bool new)
{
if (v9fs_proto_dotl(v9ses))
- return v9fs_fid_iget_dotl(sb, fid);
+ return v9fs_fid_iget_dotl(sb, fid, new);
else
- return v9fs_fid_iget(sb, fid);
+ return v9fs_fid_iget(sb, fid, new);
}
#endif
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index abdbbaee5184..348cc90bf9c5 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -520,6 +520,7 @@ const struct file_operations v9fs_file_operations = {
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
+ .setlease = simple_nosetlease,
};
const struct file_operations v9fs_file_operations_dotl = {
@@ -534,4 +535,5 @@ const struct file_operations v9fs_file_operations_dotl = {
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
+ .setlease = simple_nosetlease,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 360a5304ec03..7a3308d77606 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
int res;
int mode = stat->mode;
- res = mode & S_IALLUGO;
+ res = mode & 0777; /* S_IRWXUGO */
if (v9fs_proto_dotu(v9ses)) {
if ((mode & P9_DMSETUID) == P9_DMSETUID)
res |= S_ISUID;
@@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended)
break;
}
+ if (uflags & O_TRUNC)
+ ret |= P9_OTRUNC;
+
if (extended) {
if (uflags & O_EXCL)
ret |= P9_OEXCL;
@@ -344,20 +347,25 @@ void v9fs_evict_inode(struct inode *inode)
struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
__le32 __maybe_unused version;
- truncate_inode_pages_final(&inode->i_data);
+ if (!is_bad_inode(inode)) {
+ truncate_inode_pages_final(&inode->i_data);
- version = cpu_to_le32(v9inode->qid.version);
- netfs_clear_inode_writeback(inode, &version);
+ version = cpu_to_le32(v9inode->qid.version);
+ netfs_clear_inode_writeback(inode, &version);
- clear_inode(inode);
- filemap_fdatawrite(&inode->i_data);
+ clear_inode(inode);
+ filemap_fdatawrite(&inode->i_data);
#ifdef CONFIG_9P_FSCACHE
- fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
+ if (v9fs_inode_cookie(v9inode))
+ fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
#endif
+ } else
+ clear_inode(inode);
}
-struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
+struct inode *
+v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid, bool new)
{
dev_t rdev;
int retval;
@@ -369,8 +377,18 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else {
+ p9_debug(P9_DEBUG_VFS, "WARNING: Inode collision %ld\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -394,11 +412,11 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
/**
@@ -430,8 +448,15 @@ static int v9fs_at_to_dotl_flags(int flags)
*/
static void v9fs_dec_count(struct inode *inode)
{
- if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
- drop_nlink(inode);
+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) {
+ if (inode->i_nlink) {
+ drop_nlink(inode);
+ } else {
+ p9_debug(P9_DEBUG_VFS,
+ "WARNING: unexpected i_nlink zero %d inode %ld\n",
+ inode->i_nlink, inode->i_ino);
+ }
+ }
}
/**
@@ -482,6 +507,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
} else
v9fs_dec_count(inode);
+ if (inode->i_nlink <= 0) /* no more refs unhash it */
+ remove_inode_hash(inode);
+
v9fs_invalidate_inode_attr(inode);
v9fs_invalidate_inode_attr(dir);
@@ -547,7 +575,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
/*
* instantiate inode and assign the unopened fid to the dentry
*/
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS,
@@ -676,7 +704,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
else if (IS_ERR(fid))
inode = ERR_CAST(fid);
else
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, false);
/*
* If we had a rename on the server and a parallel lookup
* for the new name, then make sure we instantiate with
@@ -1057,8 +1085,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
- set_nlink(inode, 1);
-
inode_set_atime(inode, stat->atime, 0);
inode_set_mtime(inode, stat->mtime, 0);
inode_set_ctime(inode, stat->mtime, 0);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ef9db3e03506..c61b97bd13b9 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -52,7 +52,10 @@ static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
return current_fsgid();
}
-struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
+
+
+struct inode *
+v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid, bool new)
{
int retval;
struct inode *inode;
@@ -62,8 +65,18 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else { /* deal with race condition in inode number reuse */
+ p9_debug(P9_DEBUG_ERROR, "WARNING: Inode collision %lx\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -78,11 +91,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
retval = v9fs_init_inode(v9ses, inode, &fid->qid,
st->st_mode, new_decode_dev(st->st_rdev));
+ v9fs_stat2inode_dotl(st, inode, 0);
kfree(st);
if (retval)
goto error;
- v9fs_stat2inode_dotl(st, inode, 0);
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
@@ -90,12 +103,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
goto error;
unlock_new_inode(inode);
-
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
struct dotl_openflag_map {
@@ -247,7 +259,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
goto out;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -297,7 +309,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
umode_t omode)
{
int err;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
kgid_t gid;
const unsigned char *name;
@@ -307,7 +318,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
if (dir->i_mode & S_ISGID)
@@ -342,7 +352,7 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
}
/* instantiate inode and assign the unopened fid to the dentry */
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -739,7 +749,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
kgid_t gid;
const unsigned char *name;
umode_t mode;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
@@ -749,7 +758,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
dir->i_ino, dentry, omode,
MAJOR(rdev), MINOR(rdev));
- v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
@@ -780,7 +788,7 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
err);
goto error;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 4236058c7bbd..f52fdf42945c 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -139,7 +139,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
else
sb->s_d_op = &v9fs_dentry_operations;
- inode = v9fs_get_inode_from_fid(v9ses, fid, sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, sb, true);
if (IS_ERR(inode)) {
retval = PTR_ERR(inode);
goto release_sb;
@@ -244,6 +244,21 @@ done:
return res;
}
+static int v9fs_drop_inode(struct inode *inode)
+{
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
+ return generic_drop_inode(inode);
+ /*
+ * in case of non cached mode always drop the
+ * inode because we want the inode attribute
+ * to always match that on the server.
+ */
+ return 1;
+}
+
static int v9fs_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
@@ -268,6 +283,7 @@ static const struct super_operations v9fs_super_ops = {
.alloc_inode = v9fs_alloc_inode,
.free_inode = v9fs_free_inode,
.statfs = simple_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
@@ -278,6 +294,7 @@ static const struct super_operations v9fs_super_ops_dotl = {
.alloc_inode = v9fs_alloc_inode,
.free_inode = v9fs_free_inode,
.statfs = v9fs_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
diff --git a/fs/aio.c b/fs/aio.c
index 9cdaa2faa536..0f4f531c9780 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1202,8 +1202,8 @@ static void aio_complete(struct aio_kiocb *iocb)
spin_lock_irqsave(&ctx->wait.lock, flags);
list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
if (avail >= curr->min_nr) {
- list_del_init_careful(&curr->w.entry);
wake_up_process(curr->w.private);
+ list_del_init_careful(&curr->w.entry);
}
spin_unlock_irqrestore(&ctx->wait.lock, flags);
}
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index b02796c8a595..66ca0bbee639 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -17,6 +17,7 @@ bcachefs-y := \
btree_journal_iter.o \
btree_key_cache.o \
btree_locking.o \
+ btree_node_scan.o \
btree_trans_commit.o \
btree_update.o \
btree_update_interior.o \
@@ -37,6 +38,7 @@ bcachefs-y := \
error.o \
extents.o \
extent_update.o \
+ eytzinger.o \
fs.o \
fs-common.o \
fs-ioctl.o \
@@ -67,6 +69,7 @@ bcachefs-y := \
quota.o \
rebalance.o \
recovery.o \
+ recovery_passes.o \
reflink.o \
replicas.o \
sb-clean.o \
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 3640f417cce1..5c180fdc3efb 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -281,7 +281,6 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
- struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
struct bkey_s_c k;
int ret;
@@ -290,28 +289,27 @@ retry:
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
&hash, inode_inum(inode), &search, 0);
- if (ret) {
- if (!bch2_err_matches(ret, ENOENT))
- acl = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err;
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
- if (ret) {
- acl = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err;
- xattr = bkey_s_c_to_xattr(k);
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
- le16_to_cpu(xattr.v->x_val_len));
+ le16_to_cpu(xattr.v->x_val_len));
+ ret = PTR_ERR_OR_ZERO(acl);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- if (!IS_ERR(acl))
+ if (ret)
+ acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
+
+ if (!IS_ERR_OR_NULL(acl))
set_cached_acl(&inode->v, type, acl);
-out:
- if (bch2_err_matches(PTR_ERR_OR_ZERO(acl), BCH_ERR_transaction_restart))
- goto retry;
bch2_trans_iter_exit(trans, &iter);
bch2_trans_put(trans);
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 893e38f9db80..4ff56fa4d539 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -1713,34 +1713,37 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
if (ret)
goto out;
- if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
- a->v.gen++;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
- goto write;
- }
-
- if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
- bch2_trans_inconsistent(trans,
- "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
- "%s",
- a->v.journal_seq,
- c->journal.flushed_seq_ondisk,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ if (a->v.dirty_sectors) {
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "attempting to discard bucket with dirty data\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = -EIO;
- }
goto out;
}
if (a->v.data_type != BCH_DATA_need_discard) {
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
- bch2_trans_inconsistent(trans,
- "bucket incorrectly set in need_discard btree\n"
- "%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
+ if (data_type_is_empty(a->v.data_type) &&
+ BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+ a->v.gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ goto write;
}
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = -EIO;
+ goto out;
+ }
+
+ if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = -EIO;
goto out;
}
@@ -1835,6 +1838,7 @@ static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpo
if (ret)
goto err;
+ BUG_ON(a->v.dirty_sectors);
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
a->v.data_type = alloc_data_type(a->v, a->v.data_type);
@@ -1942,6 +1946,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
goto out;
BUG_ON(a->v.data_type != BCH_DATA_cached);
+ BUG_ON(a->v.dirty_sectors);
if (!a->v.cached_sectors)
bch_err(c, "invalidating empty bucket, confused");
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 214b15c84d1f..a1fc30adf912 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -188,8 +188,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{
switch (watermark) {
- case BCH_WATERMARK_reclaim:
+ case BCH_WATERMARK_interior_updates:
return 0;
+ case BCH_WATERMARK_reclaim:
+ return OPEN_BUCKETS_COUNT / 6;
case BCH_WATERMARK_btree:
case BCH_WATERMARK_btree_copygc:
return OPEN_BUCKETS_COUNT / 4;
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index b91b7a461056..c2226e947c41 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -22,7 +22,8 @@ struct bucket_alloc_state {
x(copygc) \
x(btree) \
x(btree_copygc) \
- x(reclaim)
+ x(reclaim) \
+ x(interior_updates)
enum bch_watermark {
#define x(name) BCH_WATERMARK_##name,
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 8cb35ea572cb..a20044201002 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -8,6 +8,7 @@
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
+#include "checksum.h"
#include "error.h"
#include <linux/mm.h>
@@ -29,8 +30,7 @@ static bool extent_matches_bp(struct bch_fs *c,
if (p.ptr.cached)
continue;
- bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
- &bucket2, &bp2);
+ bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bucket2, &bp2);
if (bpos_eq(bucket, bucket2) &&
!memcmp(&bp, &bp2, sizeof(bp)))
return true;
@@ -44,13 +44,20 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
struct printbuf *err)
{
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
+
+ /* these will be caught by fsck */
+ if (!bch2_dev_exists2(c, bp.k->p.inode))
+ return 0;
+
+ struct bch_dev *ca = bch_dev_bkey_exists(c, bp.k->p.inode);
struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
int ret = 0;
- bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
+ bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
+ !bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
c, err,
- backpointer_pos_wrong,
- "backpointer at wrong pos");
+ backpointer_bucket_offset_wrong,
+ "backpointer bucket_offset wrong");
fsck_err:
return ret;
}
@@ -378,7 +385,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
backpointer_to_missing_alloc,
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
alloc_iter.pos.inode, alloc_iter.pos.offset,
- (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, bp_iter, 0);
goto out;
}
@@ -414,6 +421,84 @@ struct extents_to_bp_state {
struct bkey_buf last_flushed;
};
+static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree,
+ struct bkey_s_c extent, unsigned dev)
+{
+ struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent);
+ int ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ bch2_bkey_drop_device(bkey_i_to_s(n), dev);
+ return bch2_btree_insert_trans(trans, btree, n, 0);
+}
+
+static int check_extent_checksum(struct btree_trans *trans,
+ enum btree_id btree, struct bkey_s_c extent,
+ enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct printbuf buf = PRINTBUF;
+ void *data_buf = NULL;
+ struct bio *bio = NULL;
+ size_t bytes;
+ int ret = 0;
+
+ if (bkey_is_btree_ptr(extent.k))
+ return false;
+
+ bkey_for_each_ptr_decode(extent.k, ptrs, p, entry)
+ if (p.ptr.dev == dev)
+ goto found;
+ BUG();
+found:
+ if (!p.crc.csum_type)
+ return false;
+
+ bytes = p.crc.compressed_size << 9;
+
+ struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
+ if (!bch2_dev_get_ioref(ca, READ))
+ return false;
+
+ data_buf = kvmalloc(bytes, GFP_KERNEL);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL);
+ bio->bi_iter.bi_sector = p.ptr.offset;
+ bch2_bio_map(bio, data_buf, bytes);
+ ret = submit_bio_wait(bio);
+ if (ret)
+ goto err;
+
+ prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
+ prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree));
+ bch2_bkey_val_to_text(&buf, c, extent);
+ prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
+ bch2_bkey_val_to_text(&buf, c, extent2);
+
+ struct nonce nonce = extent_nonce(extent.k->version, p.crc);
+ struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
+ if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
+ c, dup_backpointer_to_bad_csum_extent,
+ "%s", buf.buf))
+ ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1;
+fsck_err:
+err:
+ if (bio)
+ bio_put(bio);
+ kvfree(data_buf);
+ percpu_ref_put(&ca->io_ref);
+ printbuf_exit(&buf);
+ return ret;
+}
+
static int check_bp_exists(struct btree_trans *trans,
struct extents_to_bp_state *s,
struct bpos bucket,
@@ -421,7 +506,8 @@ static int check_bp_exists(struct btree_trans *trans,
struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
- struct btree_iter bp_iter = { NULL };
+ struct btree_iter bp_iter = {};
+ struct btree_iter other_extent_iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c bp_k;
struct bkey_buf tmp;
@@ -429,13 +515,19 @@ static int check_bp_exists(struct btree_trans *trans,
bch2_bkey_buf_init(&tmp);
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ prt_str(&buf, "extent for nonexistent device:bucket ");
+ bch2_bpos_to_text(&buf, bucket);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ bch_err(c, "%s", buf.buf);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+
if (bpos_lt(bucket, s->bucket_start) ||
bpos_gt(bucket, s->bucket_end))
return 0;
- if (!bch2_dev_bucket_exists(c, bucket))
- goto missing;
-
bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
bucket_pos_to_bp(c, bucket, bp.bucket_offset),
0);
@@ -461,21 +553,94 @@ static int check_bp_exists(struct btree_trans *trans,
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
goto out;
}
- goto missing;
+
+ goto check_existing_bp;
}
out:
err:
fsck_err:
+ bch2_trans_iter_exit(trans, &other_extent_iter);
bch2_trans_iter_exit(trans, &bp_iter);
bch2_bkey_buf_exit(&tmp, c);
printbuf_exit(&buf);
return ret;
+check_existing_bp:
+ /* Do we have a backpointer for a different extent? */
+ if (bp_k.k->type != KEY_TYPE_backpointer)
+ goto missing;
+
+ struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v;
+
+ struct bkey_s_c other_extent =
+ bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0);
+ ret = bkey_err(other_extent);
+ if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
+ ret = 0;
+ if (ret)
+ goto err;
+
+ if (!other_extent.k)
+ goto missing;
+
+ if (bch2_extents_match(orig_k, other_extent)) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, other_extent);
+ bch_err(c, "%s", buf.buf);
+
+ if (other_extent.k->size <= orig_k.k->size) {
+ ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode);
+ if (ret)
+ goto err;
+ goto out;
+ } else {
+ ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode);
+ if (ret)
+ goto err;
+ goto missing;
+ }
+ }
+
+ ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ ret = 0;
+ goto missing;
+ }
+
+ ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ printbuf_reset(&buf);
+ prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode);
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, other_extent);
+ bch_err(c, "%s", buf.buf);
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto err;
missing:
+ printbuf_reset(&buf);
prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
bch2_btree_id_str(bp.btree_id), bp.level);
bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_printf(&buf, "\nbp pos ");
- bch2_bpos_to_text(&buf, bp_iter.pos);
+ prt_printf(&buf, "\n got: ");
+ bch2_bkey_val_to_text(&buf, c, bp_k);
+
+ struct bkey_i_backpointer n_bp_k;
+ bkey_backpointer_init(&n_bp_k.k_i);
+ n_bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
+ n_bp_k.v = bp;
+ prt_printf(&buf, "\n want: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i));
if (fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
@@ -502,8 +667,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
if (p.ptr.cached)
continue;
- bch2_extent_ptr_to_bp(c, btree, level,
- k, p, &bucket_pos, &bp);
+ bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bucket_pos, &bp);
ret = check_bp_exists(trans, s, bucket_pos, bp, k);
if (ret)
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 327365a9feac..85949b9fd880 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -53,14 +53,11 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
u64 bucket_offset)
{
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
- struct bpos ret;
-
- ret = POS(bucket.inode,
- (bucket_to_sector(ca, bucket.offset) <<
- MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
+ struct bpos ret = POS(bucket.inode,
+ (bucket_to_sector(ca, bucket.offset) <<
+ MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
-
return ret;
}
@@ -90,20 +87,40 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
}
-static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p)
+static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry)
{
- return level ? BCH_DATA_btree :
- p.has_ec ? BCH_DATA_stripe :
- BCH_DATA_user;
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ return BCH_DATA_btree;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
+ case KEY_TYPE_stripe: {
+ const struct bch_extent_ptr *ptr = &entry->ptr;
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
+ BUG_ON(ptr < s.v->ptrs ||
+ ptr >= s.v->ptrs + s.v->nr_blocks);
+
+ return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity
+ : BCH_DATA_user;
+ }
+ default:
+ BUG();
+ }
}
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
struct bpos *bucket_pos, struct bch_backpointer *bp)
{
- enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
s64 sectors = level ? btree_sectors(c) : k.k->size;
u32 bucket_offset;
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 799aa32b6b4d..91c3c1fef233 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -209,7 +209,7 @@
#include "fifo.h"
#include "nocow_locking_types.h"
#include "opts.h"
-#include "recovery_types.h"
+#include "recovery_passes_types.h"
#include "sb-errors_types.h"
#include "seqmutex.h"
#include "time_stats.h"
@@ -456,6 +456,7 @@ enum bch_time_stats {
#include "alloc_types.h"
#include "btree_types.h"
+#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
#include "buckets_types.h"
#include "buckets_waiting_for_journal_types.h"
@@ -614,6 +615,7 @@ struct bch_dev {
*/
#define BCH_FS_FLAGS() \
+ x(new_fs) \
x(started) \
x(may_go_rw) \
x(rw) \
@@ -707,6 +709,8 @@ struct btree_trans_buf {
x(stripe_delete) \
x(reflink) \
x(fallocate) \
+ x(fsync) \
+ x(dio_write) \
x(discard) \
x(discard_fast) \
x(invalidate) \
@@ -796,6 +800,7 @@ struct bch_fs {
u64 features;
u64 compat;
unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
+ u64 btrees_lost_data;
} sb;
@@ -810,7 +815,6 @@ struct bch_fs {
/* snapshot.c: */
struct snapshot_table __rcu *snapshots;
- size_t snapshot_table_size;
struct mutex snapshot_table_lock;
struct rw_semaphore snapshot_create_lock;
@@ -1104,6 +1108,8 @@ struct bch_fs {
struct journal_keys journal_keys;
struct list_head journal_iters;
+ struct find_btree_nodes found_btree_nodes;
+
u64 last_bucket_seq_cleanup;
u64 counters_on_mount[BCH_COUNTER_NR];
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index bff8750ac0d7..f7fbfccd2b1e 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -578,7 +578,8 @@ struct bch_member {
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
- __le32 pad;
+ __u8 btree_bitmap_shift;
+ __u8 pad[3];
__le64 last_mount; /* time_t */
__le64 flags;
@@ -587,6 +588,7 @@ struct bch_member {
__le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
__le64 errors_reset_time;
__le64 seq;
+ __le64 btree_allocated_bitmap;
};
#define BCH_MEMBER_V1_BYTES 56
@@ -818,6 +820,7 @@ struct bch_sb_field_ext {
struct bch_sb_field field;
__le64 recovery_passes_required[2];
__le64 errors_silent[8];
+ __le64 btrees_lost_data;
};
struct bch_sb_field_downgrade_entry {
@@ -875,7 +878,8 @@ struct bch_sb_field_downgrade {
x(rebalance_work, BCH_VERSION(1, 3)) \
x(member_seq, BCH_VERSION(1, 4)) \
x(subvolume_fs_parent, BCH_VERSION(1, 5)) \
- x(btree_subvolume_children, BCH_VERSION(1, 6))
+ x(btree_subvolume_children, BCH_VERSION(1, 6)) \
+ x(mi_btree_bitmap, BCH_VERSION(1, 7))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@@ -1313,7 +1317,7 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(write_buffer_keys, 11) \
x(datetime, 12)
-enum {
+enum bch_jset_entry_type {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
BCH_JSET_ENTRY_TYPES()
#undef x
@@ -1359,7 +1363,7 @@ struct jset_entry_blacklist_v2 {
x(inodes, 1) \
x(key_version, 2)
-enum {
+enum bch_fs_usage_type {
#define x(f, nr) BCH_FS_USAGE_##f = nr,
BCH_FS_USAGE_TYPES()
#undef x
@@ -1500,7 +1504,8 @@ enum btree_id_flags {
BIT_ULL(KEY_TYPE_stripe)) \
x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
BIT_ULL(KEY_TYPE_reflink_v)| \
- BIT_ULL(KEY_TYPE_indirect_inline_data)) \
+ BIT_ULL(KEY_TYPE_indirect_inline_data)| \
+ BIT_ULL(KEY_TYPE_error)) \
x(subvolumes, 8, 0, \
BIT_ULL(KEY_TYPE_subvolume)) \
x(snapshots, 9, 0, \
@@ -1534,6 +1539,20 @@ enum btree_id {
BTREE_ID_NR
};
+static inline bool btree_id_is_alloc(enum btree_id id)
+{
+ switch (id) {
+ case BTREE_ID_alloc:
+ case BTREE_ID_backpointers:
+ case BTREE_ID_need_discard:
+ case BTREE_ID_freespace:
+ case BTREE_ID_bucket_gens:
+ return true;
+ default:
+ return false;
+ }
+}
+
#define BTREE_MAX_DEPTH 4U
/* Btree nodes */
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index cf23ff47bed8..3a45d128f608 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -314,6 +314,12 @@ static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
return bkey_packed(k) ? format->key_u64s : BKEY_U64s;
}
+static inline bool bkeyp_u64s_valid(const struct bkey_format *f,
+ const struct bkey_packed *k)
+{
+ return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s);
+}
+
static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
const struct bkey_packed *k)
{
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 5e52684764eb..db336a43fc08 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -171,11 +171,15 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
if (type >= BKEY_TYPE_NR)
return 0;
- bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) &&
+ bkey_fsck_err_on((type == BKEY_TYPE_btree ||
+ (flags & BKEY_INVALID_COMMIT)) &&
!(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
bkey_invalid_type_for_btree,
"invalid key type for btree %s (%s)",
- bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]);
+ bch2_btree_node_type_str(type),
+ k.k->type < KEY_TYPE_MAX
+ ? bch2_bkey_types[k.k->type]
+ : "(unknown)");
if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
bkey_fsck_err_on(k.k->size == 0, c, err,
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 3fd1085b6c61..3bb477840eab 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -134,18 +134,24 @@ void bch2_dump_btree_node_iter(struct btree *b,
printbuf_exit(&buf);
}
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-void __bch2_verify_btree_nr_keys(struct btree *b)
+struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
{
struct bset_tree *t;
struct bkey_packed *k;
- struct btree_nr_keys nr = { 0 };
+ struct btree_nr_keys nr = {};
for_each_bset(b, t)
bset_tree_for_each_key(b, t, k)
if (!bkey_deleted(k))
btree_keys_account_key_add(&nr, t - b->set, k);
+ return nr;
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void __bch2_verify_btree_nr_keys(struct btree *b)
+{
+ struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
}
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 79c77baaa383..120a79fd456b 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -458,6 +458,8 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
/* Accounting: */
+struct btree_nr_keys bch2_btree_node_count_keys(struct btree *);
+
static inline void btree_keys_account_key(struct btree_nr_keys *n,
unsigned bset,
struct bkey_packed *k,
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 562561a9a510..02c70e813fac 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -709,9 +709,31 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- u32 seq;
- BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
+ if (unlikely(level >= BTREE_MAX_DEPTH)) {
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
+ level, BTREE_MAX_DEPTH);
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(!bkey_is_btree_ptr(&k->k))) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf);
+ printbuf_exit(&buf);
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf);
+ printbuf_exit(&buf);
+ return ERR_PTR(ret);
+ }
+
/*
* Parent node must be locked, else we could read in a btree node that's
* been freed:
@@ -752,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
}
set_btree_node_read_in_flight(b);
-
six_unlock_write(&b->c.lock);
- seq = six_lock_seq(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- /* Unlock before doing IO: */
- if (path && sync)
- bch2_trans_unlock_noassert(trans);
-
- bch2_btree_node_read(trans, b, sync);
+ if (path) {
+ u32 seq = six_lock_seq(&b->c.lock);
- if (!sync)
- return NULL;
+ /* Unlock before doing IO: */
+ six_unlock_intent(&b->c.lock);
+ bch2_trans_unlock_noassert(trans);
- if (path) {
- int ret = bch2_trans_relock(trans) ?:
- bch2_btree_path_relock_intent(trans, path);
- if (ret) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(ret);
- }
- }
+ bch2_btree_node_read(trans, b, sync);
- if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- BUG_ON(!path);
+ if (!sync)
+ return NULL;
- trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ b = NULL;
+ } else {
+ bch2_btree_node_read(trans, b, sync);
+ if (lock_type == SIX_LOCK_read)
+ six_lock_downgrade(&b->c.lock);
}
return b;
@@ -808,7 +822,8 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
prt_printf(&buf, "\nmax ");
bch2_bpos_to_text(&buf, b->data->max_key);
- bch2_fs_inconsistent(c, "%s", buf.buf);
+ bch2_fs_topology_error(c, "%s", buf.buf);
+
printbuf_exit(&buf);
}
@@ -1111,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
BUG_ON(path && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
- b = btree_cache_find(bc, k);
+ struct btree *b = btree_cache_find(bc, k);
if (b)
return 0;
b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false);
- return PTR_ERR_OR_ZERO(b);
+ if (!IS_ERR_OR_NULL(b))
+ six_unlock_read(&b->c.lock);
+ return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
}
void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
@@ -1134,6 +1150,8 @@ void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
b = btree_cache_find(bc, k);
if (!b)
return;
+
+ BUG_ON(b == btree_node_root(trans->c, b));
wait_on_io:
/* not allowed to wait on io with btree locks held: */
@@ -1145,6 +1163,8 @@ wait_on_io:
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k)))
+ goto out;
if (btree_node_dirty(b)) {
__bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
@@ -1159,7 +1179,7 @@ wait_on_io:
btree_node_data_free(c, b);
bch2_btree_node_hash_remove(bc, b);
mutex_unlock(&bc->lock);
-
+out:
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index bdaed29f084a..791470b0c654 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -7,11 +7,13 @@
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "backpointers.h"
#include "bkey_methods.h"
#include "bkey_buf.h"
#include "btree_journal_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
+#include "btree_node_scan.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_gc.h"
@@ -24,7 +26,7 @@
#include "journal.h"
#include "keylist.h"
#include "move.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "reflink.h"
#include "replicas.h"
#include "super-io.h"
@@ -40,6 +42,7 @@
#define DROP_THIS_NODE 10
#define DROP_PREV_NODE 11
+#define DID_FILL_FROM_SCAN 12
static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
{
@@ -70,90 +73,6 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
__gc_pos_set(c, new_pos);
}
-/*
- * Missing: if an interior btree node is empty, we need to do something -
- * perhaps just kill it
- */
-static int bch2_gc_check_topology(struct bch_fs *c,
- struct btree *b,
- struct bkey_buf *prev,
- struct bkey_buf cur,
- bool is_last)
-{
- struct bpos node_start = b->data->min_key;
- struct bpos node_end = b->data->max_key;
- struct bpos expected_start = bkey_deleted(&prev->k->k)
- ? node_start
- : bpos_successor(prev->k->k.p);
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
- int ret = 0;
-
- if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
- struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
-
- if (!bpos_eq(expected_start, bp->v.min_key)) {
- bch2_topology_error(c);
-
- if (bkey_deleted(&prev->k->k)) {
- prt_printf(&buf1, "start of node: ");
- bch2_bpos_to_text(&buf1, node_start);
- } else {
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
- }
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
-
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- btree_node_topology_bad_min_key,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " cur %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto err;
- } else {
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- }
- }
- }
-
- if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
- bch2_topology_error(c);
-
- printbuf_reset(&buf1);
- printbuf_reset(&buf2);
-
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
- bch2_bpos_to_text(&buf2, node_end);
-
- if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " %s\n"
- " expected %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf) &&
- should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto err;
- } else {
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- }
- }
-
- bch2_bkey_buf_copy(prev, c, cur.k);
-err:
-fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- return ret;
-}
-
static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
{
switch (b->key.k.type) {
@@ -212,6 +131,17 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
struct bkey_i_btree_ptr_v2 *new;
int ret;
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, " -> ");
+ bch2_bpos_to_text(&buf, new_min);
+
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
+
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -BCH_ERR_ENOMEM_gc_repair_key;
@@ -237,6 +167,17 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
struct bkey_i_btree_ptr_v2 *new;
int ret;
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, " -> ");
+ bch2_bpos_to_text(&buf, new_max);
+
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
+
ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
if (ret)
return ret;
@@ -268,127 +209,138 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
return 0;
}
-static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
- struct btree *prev, struct btree *cur)
+static int btree_check_node_boundaries(struct bch_fs *c, struct btree *b,
+ struct btree *prev, struct btree *cur,
+ struct bpos *pulled_from_scan)
{
struct bpos expected_start = !prev
? b->data->min_key
: bpos_successor(prev->key.k.p);
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- if (!prev) {
- prt_printf(&buf1, "start of node: ");
- bch2_bpos_to_text(&buf1, b->data->min_key);
- } else {
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
+ BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
+ b->data->min_key));
+
+ if (bpos_eq(expected_start, cur->data->min_key))
+ return 0;
+
+ prt_printf(&buf, " at btree %s level %u:\n parent: ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ if (prev) {
+ prt_printf(&buf, "\n prev: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
}
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
-
- if (prev &&
- bpos_gt(expected_start, cur->data->min_key) &&
- BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
- /* cur overwrites prev: */
-
- if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
- cur->data->min_key), c,
- btree_node_topology_overwritten_by_next_node,
- "btree node overwritten by next node at btree %s level %u:\n"
- " node %s\n"
- " next %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = DROP_PREV_NODE;
- goto out;
- }
+ prt_str(&buf, "\n next: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
- if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
- bpos_predecessor(cur->data->min_key)), c,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " node %s\n"
- " next %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf))
- ret = set_node_max(c, prev,
- bpos_predecessor(cur->data->min_key));
- } else {
- /* prev overwrites cur: */
-
- if (mustfix_fsck_err_on(bpos_ge(expected_start,
- cur->data->max_key), c,
- btree_node_topology_overwritten_by_prev_node,
- "btree node overwritten by prev node at btree %s level %u:\n"
- " prev %s\n"
- " node %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = DROP_THIS_NODE;
- goto out;
- }
+ if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
+ if (b->c.level == 1 &&
+ bpos_lt(*pulled_from_scan, cur->data->min_key)) {
+ ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
+ expected_start,
+ bpos_predecessor(cur->data->min_key));
+ if (ret)
+ goto err;
- if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
- btree_node_topology_bad_min_key,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " node %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf))
- ret = set_node_min(c, cur, expected_start);
+ *pulled_from_scan = cur->data->min_key;
+ ret = DID_FILL_FROM_SCAN;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key%s", buf.buf))
+ ret = set_node_min(c, cur, expected_start);
+ }
+ } else { /* overlap */
+ if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
+ if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
+ if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_next_node,
+ "btree node overwritten by next node%s", buf.buf))
+ ret = DROP_PREV_NODE;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
+ "btree node with incorrect max_key%s", buf.buf))
+ ret = set_node_max(c, prev,
+ bpos_predecessor(cur->data->min_key));
+ }
+ } else {
+ if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
+ if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_prev_node,
+ "btree node overwritten by prev node%s", buf.buf))
+ ret = DROP_THIS_NODE;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key%s", buf.buf))
+ ret = set_node_min(c, cur, expected_start);
+ }
+ }
}
-out:
+err:
fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
+ printbuf_exit(&buf);
return ret;
}
static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
- struct btree *child)
+ struct btree *child, struct bpos *pulled_from_scan)
{
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
- bch2_bpos_to_text(&buf2, b->key.k.p);
+ if (bpos_eq(child->key.k.p, b->key.k.p))
+ return 0;
- if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " %s\n"
- " expected %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = set_node_max(c, child, b->key.k.p);
- if (ret)
- goto err;
+ prt_printf(&buf, "at btree %s level %u:\n parent: ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ prt_str(&buf, "\n child: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
+
+ if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
+ "btree node with incorrect max_key%s", buf.buf)) {
+ if (b->c.level == 1 &&
+ bpos_lt(*pulled_from_scan, b->key.k.p)) {
+ ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
+ bpos_successor(child->key.k.p), b->key.k.p);
+ if (ret)
+ goto err;
+
+ *pulled_from_scan = b->key.k.p;
+ ret = DID_FILL_FROM_SCAN;
+ } else {
+ ret = set_node_max(c, child, b->key.k.p);
+ }
}
err:
fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
+ printbuf_exit(&buf);
return ret;
}
-static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b)
+static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
+ struct bpos *pulled_from_scan)
{
struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf prev_k, cur_k;
struct btree *prev = NULL, *cur = NULL;
- bool have_child, dropped_children = false;
+ bool have_child, new_pass = false;
struct printbuf buf = PRINTBUF;
int ret = 0;
if (!b->c.level)
return 0;
-again:
- prev = NULL;
- have_child = dropped_children = false;
+
bch2_bkey_buf_init(&prev_k);
bch2_bkey_buf_init(&cur_k);
+again:
+ cur = prev = NULL;
+ have_child = new_pass = false;
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
iter.prefetch = true;
@@ -415,11 +367,17 @@ again:
b->c.level - 1,
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
+ cur = NULL;
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
- cur = NULL;
if (ret)
break;
+
+ if (!btree_id_is_alloc(b->c.btree_id)) {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ break;
+ }
continue;
}
@@ -427,7 +385,23 @@ again:
if (ret)
break;
- ret = btree_repair_node_boundaries(c, b, prev, cur);
+ if (bch2_btree_node_is_stale(c, cur)) {
+ bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
+ six_unlock_read(&cur->c.lock);
+ bch2_btree_node_evict(trans, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ cur = NULL;
+ if (ret)
+ break;
+ continue;
+ }
+
+ ret = btree_check_node_boundaries(c, b, prev, cur, pulled_from_scan);
+ if (ret == DID_FILL_FROM_SCAN) {
+ new_pass = true;
+ ret = 0;
+ }
if (ret == DROP_THIS_NODE) {
six_unlock_read(&cur->c.lock);
@@ -445,6 +419,7 @@ again:
prev = NULL;
if (ret == DROP_PREV_NODE) {
+ bch_info(c, "dropped prev node");
bch2_btree_node_evict(trans, prev_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, prev_k.k->k.p);
@@ -452,8 +427,6 @@ again:
break;
bch2_btree_and_journal_iter_exit(&iter);
- bch2_bkey_buf_exit(&prev_k, c);
- bch2_bkey_buf_exit(&cur_k, c);
goto again;
} else if (ret)
break;
@@ -465,7 +438,11 @@ again:
if (!ret && !IS_ERR_OR_NULL(prev)) {
BUG_ON(cur);
- ret = btree_repair_node_end(c, b, prev);
+ ret = btree_repair_node_end(c, b, prev, pulled_from_scan);
+ if (ret == DID_FILL_FROM_SCAN) {
+ new_pass = true;
+ ret = 0;
+ }
}
if (!IS_ERR_OR_NULL(prev))
@@ -479,6 +456,10 @@ again:
goto err;
bch2_btree_and_journal_iter_exit(&iter);
+
+ if (new_pass)
+ goto again;
+
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
iter.prefetch = true;
@@ -495,7 +476,7 @@ again:
if (ret)
goto err;
- ret = bch2_btree_repair_topology_recurse(trans, cur);
+ ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
six_unlock_read(&cur->c.lock);
cur = NULL;
@@ -503,7 +484,7 @@ again:
bch2_btree_node_evict(trans, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
- dropped_children = true;
+ new_pass = true;
}
if (ret)
@@ -530,12 +511,14 @@ fsck_err:
six_unlock_read(&cur->c.lock);
bch2_btree_and_journal_iter_exit(&iter);
- bch2_bkey_buf_exit(&prev_k, c);
- bch2_bkey_buf_exit(&cur_k, c);
- if (!ret && dropped_children)
+ if (!ret && new_pass)
goto again;
+ BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
+
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
printbuf_exit(&buf);
return ret;
}
@@ -543,32 +526,63 @@ fsck_err:
int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree *b;
- unsigned i;
+ struct bpos pulled_from_scan = POS_MIN;
int ret = 0;
- for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
+ bool reconstructed_root = false;
- if (!r->alive)
- continue;
+ if (r->error) {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ break;
+reconstruct_root:
+ bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
- b = r->b;
- if (btree_node_fake(b))
- continue;
+ r->alive = false;
+ r->error = 0;
+
+ if (!bch2_btree_has_scanned_nodes(c, i)) {
+ mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
+ "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
+ bch2_btree_root_alloc_fake(c, i, 0);
+ } else {
+ bch2_btree_root_alloc_fake(c, i, 1);
+ bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
+ if (ret)
+ break;
+ }
+
+ reconstructed_root = true;
+ }
+
+ struct btree *b = r->b;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- ret = bch2_btree_repair_topology_recurse(trans, b);
+ ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
six_unlock_read(&b->c.lock);
if (ret == DROP_THIS_NODE) {
- bch_err(c, "empty btree root - repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&b->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
+
+ r->b = NULL;
+
+ if (!reconstructed_root)
+ goto reconstruct_root;
+
+ bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
+ bch2_btree_root_alloc_fake(c, i, 0);
+ r->alive = false;
+ ret = 0;
}
}
-
+fsck_err:
bch2_trans_put(trans);
-
return ret;
}
@@ -591,7 +605,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
if (fsck_err_on(!g->gen_valid,
c, ptr_to_missing_alloc_key,
@@ -657,7 +671,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
continue;
if (fsck_err_on(bucket_data_type(g->data_type) &&
- bucket_data_type(g->data_type) != data_type, c,
+ bucket_data_type(g->data_type) !=
+ bucket_data_type(data_type), c,
ptr_bucket_data_type_mismatch,
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
@@ -698,18 +713,13 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
}
if (do_update) {
- struct bkey_ptrs ptrs;
- union bch_extent_entry *entry;
- struct bch_extent_ptr *ptr;
- struct bkey_i *new;
-
if (is_root) {
bch_err(c, "cannot update btree roots yet");
ret = -EINVAL;
goto err;
}
- new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
+ struct bkey_i *new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
ret = -BCH_ERR_ENOMEM_gc_repair_key;
bch_err_msg(c, ret, "allocating new key");
@@ -724,7 +734,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
* btree node isn't there anymore, the read path will
* sort it out:
*/
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
@@ -732,19 +742,26 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
ptr->gen = g->gen;
}
} else {
- bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_GC_BUCKET(ca, ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
-
- (ptr->cached &&
- (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
- (!ptr->cached &&
- gen_cmp(ptr->gen, g->gen) < 0) ||
- gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
- (g->data_type &&
- g->data_type != data_type);
- }));
+ struct bkey_ptrs ptrs;
+ union bch_extent_entry *entry;
+restart_drop_ptrs:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
+
+ if ((p.ptr.cached &&
+ (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
+ (!p.ptr.cached &&
+ gen_cmp(p.ptr.gen, g->gen) < 0) ||
+ gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
+ (g->data_type &&
+ g->data_type != data_type)) {
+ bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
+ goto restart_drop_ptrs;
+ }
+ }
again:
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_extent_entry_for_each(ptrs, entry) {
@@ -774,12 +791,6 @@ found:
}
}
- ret = bch2_journal_key_insert_take(c, btree_id, level, new);
- if (ret) {
- kfree(new);
- goto err;
- }
-
if (level)
bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
@@ -793,6 +804,12 @@ found:
bch_info(c, "new key %s", buf.buf);
}
+ ret = bch2_journal_key_insert_take(c, btree_id, level, new);
+ if (ret) {
+ kfree(new);
+ goto err;
+ }
+
*k = bkey_i_to_s_c(new);
}
err:
@@ -811,6 +828,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
struct bch_fs *c = trans->c;
struct bkey deleted = KEY(0, 0, 0);
struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
+ struct printbuf buf = PRINTBUF;
int ret = 0;
deleted.p = k->k->p;
@@ -819,10 +837,6 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
BUG_ON(bch2_journal_seq_verify &&
k->k->version.lo > atomic64_read(&c->journal.seq));
- ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
- if (ret)
- goto err;
-
if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
bkey_version_in_future,
"key version number higher than recorded: %llu > %llu",
@@ -831,52 +845,57 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
atomic64_set(&c->key_version, k->k->version.lo);
}
+ ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
+ if (ret)
+ goto err;
+
+ if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, *k),
+ c, btree_bitmap_not_marked,
+ "btree ptr not marked in member info btree allocated bitmap\n %s",
+ (bch2_bkey_val_to_text(&buf, c, *k),
+ buf.buf))) {
+ mutex_lock(&c->sb_lock);
+ bch2_dev_btree_bitmap_mark(c, *k);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
ret = commit_do(trans, NULL, NULL, 0,
- bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
+ bch2_key_trigger(trans, btree_id, level, old,
+ unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
fsck_err:
err:
+ printbuf_exit(&buf);
bch_err_fn(c, ret);
return ret;
}
static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
{
- struct bch_fs *c = trans->c;
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
- struct bkey_buf prev, cur;
int ret = 0;
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret)
+ return ret;
+
if (!btree_node_type_needs_gc(btree_node_type(b)))
return 0;
bch2_btree_node_iter_init_from_start(&iter, b);
- bch2_bkey_buf_init(&prev);
- bch2_bkey_buf_init(&cur);
- bkey_init(&prev.k->k);
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
&k, initial);
if (ret)
- break;
+ return ret;
bch2_btree_node_iter_advance(&iter, b);
-
- if (b->c.level) {
- bch2_bkey_buf_reassemble(&cur, c, k);
-
- ret = bch2_gc_check_topology(c, b, &prev, cur,
- bch2_btree_node_iter_end(&iter));
- if (ret)
- break;
- }
}
- bch2_bkey_buf_exit(&cur, c);
- bch2_bkey_buf_exit(&prev, c);
- return ret;
+ return 0;
}
static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
@@ -925,14 +944,16 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
- struct bkey_buf cur, prev;
+ struct bkey_buf cur;
struct printbuf buf = PRINTBUF;
int ret = 0;
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret)
+ return ret;
+
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- bch2_bkey_buf_init(&prev);
bch2_bkey_buf_init(&cur);
- bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
@@ -943,20 +964,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
if (ret)
goto fsck_err;
- if (b->c.level) {
- bch2_bkey_buf_reassemble(&cur, c, k);
- k = bkey_i_to_s_c(cur.k);
-
- bch2_btree_and_journal_iter_advance(&iter);
-
- ret = bch2_gc_check_topology(c, b,
- &prev, cur,
- !bch2_btree_and_journal_iter_peek(&iter).k);
- if (ret)
- goto fsck_err;
- } else {
- bch2_btree_and_journal_iter_advance(&iter);
- }
+ bch2_btree_and_journal_iter_advance(&iter);
}
if (b->c.level > target_depth) {
@@ -1015,7 +1023,6 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
}
fsck_err:
bch2_bkey_buf_exit(&cur, c);
- bch2_bkey_buf_exit(&prev, c);
bch2_btree_and_journal_iter_exit(&iter);
printbuf_exit(&buf);
return ret;
@@ -1033,9 +1040,6 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
b = bch2_btree_id_root(c, btree_id)->b;
- if (btree_node_fake(b))
- return 0;
-
six_lock_read(&b->c.lock, NULL, NULL);
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->min_key);
@@ -1583,7 +1587,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
- return ret;
+ goto out;
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
@@ -1591,6 +1595,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
*bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
ret = bch2_trans_update(trans, iter, new, 0);
}
+out:
fsck_err:
printbuf_exit(&buf);
return ret;
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 34df8ccc5fec..debb0edc3455 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -654,6 +654,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
*/
bch2_bset_set_no_aux_tree(b, b->set);
bch2_btree_build_aux_trees(b);
+ b->nr = bch2_btree_node_count_keys(b);
struct bkey_s_c k;
struct bkey unpacked;
@@ -830,7 +831,7 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b,
(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
}
-static bool __bkey_valid(struct bch_fs *c, struct btree *b,
+static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
struct bset *i, struct bkey_packed *k)
{
if (bkey_p_next(k) > vstruct_last(i))
@@ -839,7 +840,7 @@ static bool __bkey_valid(struct bch_fs *c, struct btree *b,
if (k->format > KEY_FORMAT_CURRENT)
return false;
- if (k->u64s < bkeyp_key_u64s(&b->format, k))
+ if (!bkeyp_u64s_valid(&b->format, k))
return false;
struct printbuf buf = PRINTBUF;
@@ -883,11 +884,13 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
"invalid bkey format %u", k->format))
goto drop_this_key;
- if (btree_err_on(k->u64s < bkeyp_key_u64s(&b->format, k),
+ if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
-BCH_ERR_btree_node_read_err_fixable,
c, NULL, b, i,
btree_node_bkey_bad_u64s,
- "k->u64s too small (%u < %u)", k->u64s, bkeyp_key_u64s(&b->format, k)))
+ "bad k->u64s %u (min %u max %zu)", k->u64s,
+ bkeyp_key_u64s(&b->format, k),
+ U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
goto drop_this_key;
if (!write)
@@ -946,13 +949,12 @@ drop_this_key:
* do
*/
- if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
+ if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
for (next_good_key = 1;
next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
next_good_key++)
- if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
+ if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
goto got_good_key;
-
}
/*
@@ -1263,10 +1265,12 @@ out:
return retry_read;
fsck_err:
if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
- ret == -BCH_ERR_btree_node_read_err_must_retry)
+ ret == -BCH_ERR_btree_node_read_err_must_retry) {
retry_read = 1;
- else
+ } else {
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
+ }
goto out;
}
@@ -1327,6 +1331,7 @@ start:
if (!can_retry) {
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
break;
}
}
@@ -1335,7 +1340,9 @@ start:
rb->start_time);
bio_put(&rb->bio);
- if (saw_error && !btree_node_read_error(b)) {
+ if (saw_error &&
+ !btree_node_read_error(b) &&
+ c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->key.k.p);
bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
@@ -1526,9 +1533,10 @@ fsck_err:
ret = -1;
}
- if (ret)
+ if (ret) {
set_btree_node_read_error(b);
- else if (*saw_error)
+ bch2_btree_lost_data(c, b->c.btree_id);
+ } else if (*saw_error)
bch2_btree_node_rewrite_async(c, b);
for (i = 0; i < ra->nr; i++) {
@@ -1657,13 +1665,14 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
prt_str(&buf, "btree node read error: no device to read from\n at ");
bch2_btree_pos_to_text(&buf, c, b);
- bch_err(c, "%s", buf.buf);
+ bch_err_ratelimited(c, "%s", buf.buf);
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
bch2_fatal_error(c);
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
printbuf_exit(&buf);
@@ -1860,7 +1869,7 @@ static void btree_node_write_work(struct work_struct *work)
} else {
ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
- BCH_WATERMARK_reclaim|
+ BCH_WATERMARK_interior_updates|
BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_check_rw,
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 51bcdc6c6d1c..2a211a4bebd1 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -927,8 +927,22 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
if (ret)
goto err;
} else {
- bch2_bkey_buf_unpack(&tmp, c, l->b,
- bch2_btree_node_iter_peek(&l->iter, l->b));
+ struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
+ if (!k) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "node not found at pos ");
+ bch2_bpos_to_text(&buf, path->pos);
+ prt_str(&buf, " within parent node ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
+
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ ret = -BCH_ERR_btree_need_topology_repair;
+ goto err;
+ }
+
+ bch2_bkey_buf_unpack(&tmp, c, l->b, k);
if ((flags & BTREE_ITER_PREFETCH) &&
c->opts.btree_node_prefetch) {
@@ -962,7 +976,6 @@ err:
return ret;
}
-
static int bch2_btree_path_traverse_all(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
@@ -2790,6 +2803,31 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
struct btree_transaction_stats *s = btree_trans_stats(trans);
s->max_mem = max(s->max_mem, new_bytes);
+ if (trans->used_mempool) {
+ if (trans->mem_bytes >= new_bytes)
+ goto out_change_top;
+
+ /* No more space from mempool item, need malloc new one */
+ new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
+ if (unlikely(!new_mem)) {
+ bch2_trans_unlock(trans);
+
+ new_mem = kmalloc(new_bytes, GFP_KERNEL);
+ if (!new_mem)
+ return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
+
+ ret = bch2_trans_relock(trans);
+ if (ret) {
+ kfree(new_mem);
+ return ERR_PTR(ret);
+ }
+ }
+ memcpy(new_mem, trans->mem, trans->mem_top);
+ trans->used_mempool = false;
+ mempool_free(trans->mem, &c->btree_trans_mem_pool);
+ goto out_new_mem;
+ }
+
new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
if (unlikely(!new_mem)) {
bch2_trans_unlock(trans);
@@ -2798,6 +2836,8 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
new_bytes = BTREE_TRANS_MEM_MAX;
+ memcpy(new_mem, trans->mem, trans->mem_top);
+ trans->used_mempool = true;
kfree(trans->mem);
}
@@ -2811,7 +2851,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
if (ret)
return ERR_PTR(ret);
}
-
+out_new_mem:
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
@@ -2819,7 +2859,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
}
-
+out_change_top:
p = trans->mem + trans->mem_top;
trans->mem_top += size;
memset(p, 0, size);
@@ -3093,7 +3133,7 @@ void bch2_trans_put(struct btree_trans *trans)
if (paths_allocated != trans->_paths_allocated)
kvfree_rcu_mightsleep(paths_allocated);
- if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ if (trans->used_mempool)
mempool_free(trans->mem, &c->btree_trans_mem_pool);
else
kfree(trans->mem);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 24772538e4cc..1c70836dd7cc 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -498,8 +498,13 @@ static inline void set_btree_iter_dontneed(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
- if (!trans->restarted)
- btree_iter_path(trans, iter)->preserve = false;
+ if (!iter->path || trans->restarted)
+ return;
+
+ struct btree_path *path = btree_iter_path(trans, iter);
+ path->preserve = false;
+ if (path->ref == 1)
+ path->should_be_locked = false;
}
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
@@ -642,7 +647,7 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *);
static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{
- if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_INITIAL - 8)
+ if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
return __bch2_btree_trans_too_many_iters(trans);
return 0;
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 50e04356d72c..1e8cf49a6935 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -130,12 +130,30 @@ struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree
return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
}
+static void journal_iter_verify(struct journal_iter *iter)
+{
+ struct journal_keys *keys = iter->keys;
+ size_t gap_size = keys->size - keys->nr;
+
+ BUG_ON(iter->idx >= keys->gap &&
+ iter->idx < keys->gap + gap_size);
+
+ if (iter->idx < keys->size) {
+ struct journal_key *k = keys->data + iter->idx;
+
+ int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
+ cmp_int(k->level, iter->level);
+ BUG_ON(cmp < 0);
+ }
+}
+
static void journal_iters_fix(struct bch_fs *c)
{
struct journal_keys *keys = &c->journal_keys;
/* The key we just inserted is immediately before the gap: */
size_t gap_end = keys->gap + (keys->size - keys->nr);
- struct btree_and_journal_iter *iter;
+ struct journal_key *new_key = &keys->data[keys->gap - 1];
+ struct journal_iter *iter;
/*
* If an iterator points one after the key we just inserted, decrement
@@ -143,9 +161,14 @@ static void journal_iters_fix(struct bch_fs *c)
* decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
* handle that:
*/
- list_for_each_entry(iter, &c->journal_iters, journal.list)
- if (iter->journal.idx == gap_end)
- iter->journal.idx = keys->gap - 1;
+ list_for_each_entry(iter, &c->journal_iters, list) {
+ journal_iter_verify(iter);
+ if (iter->idx == gap_end &&
+ new_key->btree_id == iter->btree_id &&
+ new_key->level == iter->level)
+ iter->idx = keys->gap - 1;
+ journal_iter_verify(iter);
+ }
}
static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
@@ -192,7 +215,12 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
if (idx > keys->gap)
idx -= keys->size - keys->nr;
+ size_t old_gap = keys->gap;
+
if (keys->nr == keys->size) {
+ journal_iters_move_gap(c, old_gap, keys->size);
+ old_gap = keys->size;
+
struct journal_keys new_keys = {
.nr = keys->nr,
.size = max_t(size_t, keys->size, 8) * 2,
@@ -216,7 +244,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
keys->gap = keys->nr;
}
- journal_iters_move_gap(c, keys->gap, idx);
+ journal_iters_move_gap(c, old_gap, idx);
move_gap(keys, idx);
@@ -261,6 +289,22 @@ int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
return bch2_journal_key_insert(c, id, level, &whiteout);
}
+bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
+ unsigned level, struct bpos pos)
+{
+ struct journal_keys *keys = &trans->c->journal_keys;
+ size_t idx = bch2_journal_key_search(keys, btree, level, pos);
+
+ if (!trans->journal_replay_not_finished)
+ return false;
+
+ return (idx < keys->size &&
+ keys->data[idx].btree_id == btree &&
+ keys->data[idx].level == level &&
+ bpos_eq(keys->data[idx].k->k.p, pos) &&
+ bkey_deleted(&keys->data[idx].k->k));
+}
+
void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
unsigned level, struct bpos pos)
{
@@ -285,16 +329,21 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
{
- struct journal_key *k = iter->keys->data + iter->idx;
+ journal_iter_verify(iter);
+
+ while (iter->idx < iter->keys->size) {
+ struct journal_key *k = iter->keys->data + iter->idx;
+
+ int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
+ cmp_int(k->level, iter->level);
+ if (cmp > 0)
+ break;
+ BUG_ON(cmp);
- while (k < iter->keys->data + iter->keys->size &&
- k->btree_id == iter->btree_id &&
- k->level == iter->level) {
if (!k->overwritten)
return bkey_i_to_s_c(k->k);
bch2_journal_iter_advance(iter);
- k = iter->keys->data + iter->idx;
}
return bkey_s_c_null;
@@ -314,6 +363,8 @@ static void bch2_journal_iter_init(struct bch_fs *c,
iter->level = level;
iter->keys = &c->journal_keys;
iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
+
+ journal_iter_verify(iter);
}
static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
@@ -363,7 +414,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
{
- struct bkey_s_c btree_k, journal_k, ret;
+ struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
if (iter->prefetch && iter->journal.level)
btree_and_journal_iter_prefetch(iter);
@@ -375,9 +426,10 @@ again:
bpos_lt(btree_k.k->p, iter->pos))
bch2_journal_iter_advance_btree(iter);
- while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
- bpos_lt(journal_k.k->p, iter->pos))
- bch2_journal_iter_advance(&iter->journal);
+ if (iter->trans->journal_replay_not_finished)
+ while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
+ bpos_lt(journal_k.k->p, iter->pos))
+ bch2_journal_iter_advance(&iter->journal);
ret = journal_k.k &&
(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
@@ -417,10 +469,15 @@ void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
iter->trans = trans;
iter->b = b;
iter->node_iter = node_iter;
- bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
- INIT_LIST_HEAD(&iter->journal.list);
iter->pos = b->data->min_key;
iter->at_end = false;
+ INIT_LIST_HEAD(&iter->journal.list);
+
+ if (trans->journal_replay_not_finished) {
+ bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
+ if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
+ list_add(&iter->journal.list, &trans->c->journal_iters);
+ }
}
/*
@@ -435,7 +492,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
bch2_btree_node_iter_init_from_start(&node_iter, b);
__bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
- list_add(&iter->journal.list, &trans->c->journal_iters);
}
/* sort and dedup all keys in the journal: */
@@ -548,3 +604,22 @@ int bch2_journal_keys_sort(struct bch_fs *c)
bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
return 0;
}
+
+void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
+ unsigned level_min, unsigned level_max,
+ struct bpos start, struct bpos end)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ size_t dst = 0;
+
+ move_gap(keys, keys->nr);
+
+ darray_for_each(*keys, i)
+ if (!(i->btree_id == btree &&
+ i->level >= level_min &&
+ i->level <= level_max &&
+ bpos_ge(i->k->k.p, start) &&
+ bpos_le(i->k->k.p, end)))
+ keys->data[dst++] = *i;
+ keys->nr = keys->gap = dst;
+}
diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h
index c9d19da3ea04..af25046ebcaa 100644
--- a/fs/bcachefs/btree_journal_iter.h
+++ b/fs/bcachefs/btree_journal_iter.h
@@ -40,8 +40,8 @@ int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
unsigned, struct bkey_i *);
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
unsigned, struct bpos);
-void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id,
- unsigned, struct bpos);
+bool bch2_key_deleted_in_journal(struct btree_trans *, enum btree_id, unsigned, struct bpos);
+void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id, unsigned, struct bpos);
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *);
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *);
@@ -66,4 +66,8 @@ void bch2_journal_entries_free(struct bch_fs *);
int bch2_journal_keys_sort(struct bch_fs *);
+void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id,
+ unsigned, unsigned,
+ struct bpos, struct bpos);
+
#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 581edcb0911b..e8c1c530cd95 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -169,6 +169,7 @@ static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
} else {
mutex_lock(&bc->lock);
list_move_tail(&ck->list, &bc->freed_pcpu);
+ bc->nr_freed_pcpu++;
mutex_unlock(&bc->lock);
}
}
@@ -245,6 +246,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
if (!list_empty(&bc->freed_pcpu)) {
ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
list_del_init(&ck->list);
+ bc->nr_freed_pcpu--;
}
mutex_unlock(&bc->lock);
}
@@ -659,7 +661,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
commit_flags |= BCH_WATERMARK_reclaim;
if (ck->journal.seq != journal_last_seq(j) ||
- j->watermark == BCH_WATERMARK_stripe)
+ !test_bit(JOURNAL_SPACE_LOW, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
ret = bch2_btree_iter_traverse(&b_iter) ?:
@@ -840,8 +842,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
* Newest freed entries are at the end of the list - once we hit one
* that's too new to be freed, we can bail out:
*/
- scanned += bc->nr_freed_nonpcpu;
-
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
ck->btree_trans_barrier_seq))
@@ -855,11 +855,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
bc->nr_freed_nonpcpu--;
}
- if (scanned >= nr)
- goto out;
-
- scanned += bc->nr_freed_pcpu;
-
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
ck->btree_trans_barrier_seq))
@@ -873,9 +868,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
bc->nr_freed_pcpu--;
}
- if (scanned >= nr)
- goto out;
-
rcu_read_lock();
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
if (bc->shrink_iter >= tbl->size)
@@ -891,12 +883,12 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
ck = container_of(pos, struct bkey_cached, hash);
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
goto next;
-
- if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
- else if (bkey_cached_lock_for_evict(ck)) {
+ goto next;
+ } else if (bkey_cached_lock_for_evict(ck)) {
bkey_cached_evict(bc, ck);
bkey_cached_free(bc, ck);
}
@@ -914,7 +906,6 @@ next:
} while (scanned < nr && bc->shrink_iter != start);
rcu_read_unlock();
-out:
memalloc_nofs_restore(flags);
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
mutex_unlock(&bc->lock);
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index b9b151e693ed..f2caf491957e 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -440,33 +440,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
struct btree_bkey_cached_common *b)
{
- struct btree_path *linked;
- unsigned i, iter;
- int ret;
-
- /*
- * XXX BIG FAT NOTICE
- *
- * Drop all read locks before taking a write lock:
- *
- * This is a hack, because bch2_btree_node_lock_write_nofail() is a
- * hack - but by dropping read locks first, this should never fail, and
- * we only use this in code paths where whatever read locks we've
- * already taken are no longer needed:
- */
-
- trans_for_each_path(trans, linked, iter) {
- if (!linked->nodes_locked)
- continue;
-
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
- if (btree_node_read_locked(linked, i)) {
- btree_node_unlock(trans, linked, i);
- btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
- }
- }
-
- ret = __btree_node_lock_write(trans, path, b, true);
+ int ret = __btree_node_lock_write(trans, path, b, true);
BUG_ON(ret);
}
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
new file mode 100644
index 000000000000..c60794264da2
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_journal_iter.h"
+#include "btree_node_scan.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "error.h"
+#include "journal_io.h"
+#include "recovery_passes.h"
+
+#include <linux/kthread.h>
+#include <linux/sort.h>
+
+struct find_btree_nodes_worker {
+ struct closure *cl;
+ struct find_btree_nodes *f;
+ struct bch_dev *ca;
+};
+
+static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
+{
+ prt_printf(out, "%s l=%u seq=%u cookie=%llx ", bch2_btree_id_str(n->btree_id), n->level, n->seq, n->cookie);
+ bch2_bpos_to_text(out, n->min_key);
+ prt_str(out, "-");
+ bch2_bpos_to_text(out, n->max_key);
+
+ if (n->range_updated)
+ prt_str(out, " range updated");
+ if (n->overwritten)
+ prt_str(out, " overwritten");
+
+ for (unsigned i = 0; i < n->nr_ptrs; i++) {
+ prt_char(out, ' ');
+ bch2_extent_ptr_to_text(out, c, n->ptrs + i);
+ }
+}
+
+static void found_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c, found_btree_nodes nodes)
+{
+ printbuf_indent_add(out, 2);
+ darray_for_each(nodes, i) {
+ found_btree_node_to_text(out, c, i);
+ prt_newline(out);
+ }
+ printbuf_indent_sub(out, 2);
+}
+
+static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_node *f)
+{
+ struct bkey_i_btree_ptr_v2 *bp = bkey_btree_ptr_v2_init(k);
+
+ set_bkey_val_u64s(&bp->k, sizeof(struct bch_btree_ptr_v2) / sizeof(u64) + f->nr_ptrs);
+ bp->k.p = f->max_key;
+ bp->v.seq = cpu_to_le64(f->cookie);
+ bp->v.sectors_written = 0;
+ bp->v.flags = 0;
+ bp->v.min_key = f->min_key;
+ SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
+ memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
+}
+
+static bool found_btree_node_is_readable(struct btree_trans *trans,
+ const struct found_btree_node *f)
+{
+ struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } k;
+
+ found_btree_node_to_key(&k.k, f);
+
+ struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
+ bool ret = !IS_ERR_OR_NULL(b);
+ if (ret)
+ six_unlock_read(&b->c.lock);
+
+ /*
+ * We might update this node's range; if that happens, we need the node
+ * to be re-read so the read path can trim keys that are no longer in
+ * this node
+ */
+ if (b != btree_node_root(trans->c, b))
+ bch2_btree_node_evict(trans, &k.k);
+ return ret;
+}
+
+static int found_btree_node_cmp_cookie(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->level, r->level) ?:
+ cmp_int(l->cookie, r->cookie);
+}
+
+/*
+ * Given two found btree nodes, if their sequence numbers are equal, take the
+ * one that's readable:
+ */
+static int found_btree_node_cmp_time(const struct found_btree_node *l,
+ const struct found_btree_node *r)
+{
+ return cmp_int(l->seq, r->seq);
+}
+
+static int found_btree_node_cmp_pos(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->min_key, r->min_key) ?:
+ -found_btree_node_cmp_time(l, r);
+}
+
+static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
+ struct bio *bio, struct btree_node *bn, u64 offset)
+{
+ struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
+
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ);
+ bio->bi_iter.bi_sector = offset;
+ bch2_bio_map(bio, bn, PAGE_SIZE);
+
+ submit_bio_wait(bio);
+ if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
+ "IO error in try_read_btree_node() at %llu: %s",
+ offset, bch2_blk_status_to_str(bio->bi_status)))
+ return;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c))
+ return;
+
+ if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
+ struct nonce nonce = btree_nonce(&bn->keys, 0);
+ unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+ bch2_encrypt(c, BSET_CSUM_TYPE(&bn->keys), nonce, &bn->flags, bytes);
+ }
+
+ if (btree_id_is_alloc(BTREE_NODE_ID(bn)))
+ return;
+
+ if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH)
+ return;
+
+ rcu_read_lock();
+ struct found_btree_node n = {
+ .btree_id = BTREE_NODE_ID(bn),
+ .level = BTREE_NODE_LEVEL(bn),
+ .seq = BTREE_NODE_SEQ(bn),
+ .cookie = le64_to_cpu(bn->keys.seq),
+ .min_key = bn->min_key,
+ .max_key = bn->max_key,
+ .nr_ptrs = 1,
+ .ptrs[0].type = 1 << BCH_EXTENT_ENTRY_ptr,
+ .ptrs[0].offset = offset,
+ .ptrs[0].dev = ca->dev_idx,
+ .ptrs[0].gen = *bucket_gen(ca, sector_to_bucket(ca, offset)),
+ };
+ rcu_read_unlock();
+
+ if (bch2_trans_run(c, found_btree_node_is_readable(trans, &n))) {
+ mutex_lock(&f->lock);
+ if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) {
+ bch_err(c, "try_read_btree_node() can't handle endian conversion");
+ f->ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (darray_push(&f->nodes, n))
+ f->ret = -ENOMEM;
+unlock:
+ mutex_unlock(&f->lock);
+ }
+}
+
+static int read_btree_nodes_worker(void *p)
+{
+ struct find_btree_nodes_worker *w = p;
+ struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes);
+ struct bch_dev *ca = w->ca;
+ void *buf = (void *) __get_free_page(GFP_KERNEL);
+ struct bio *bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
+ unsigned long last_print = jiffies;
+
+ if (!buf || !bio) {
+ bch_err(c, "read_btree_nodes_worker: error allocating bio/buf");
+ w->f->ret = -ENOMEM;
+ goto err;
+ }
+
+ for (u64 bucket = ca->mi.first_bucket; bucket < ca->mi.nbuckets; bucket++)
+ for (unsigned bucket_offset = 0;
+ bucket_offset + btree_sectors(c) <= ca->mi.bucket_size;
+ bucket_offset += btree_sectors(c)) {
+ if (time_after(jiffies, last_print + HZ * 30)) {
+ u64 cur_sector = bucket * ca->mi.bucket_size + bucket_offset;
+ u64 end_sector = ca->mi.nbuckets * ca->mi.bucket_size;
+
+ bch_info(ca, "%s: %2u%% done", __func__,
+ (unsigned) div64_u64(cur_sector * 100, end_sector));
+ last_print = jiffies;
+ }
+
+ u64 sector = bucket * ca->mi.bucket_size + bucket_offset;
+
+ if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_mi_btree_bitmap &&
+ !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c)))
+ continue;
+
+ try_read_btree_node(w->f, ca, bio, buf, sector);
+ }
+err:
+ bio_put(bio);
+ free_page((unsigned long) buf);
+ percpu_ref_get(&ca->io_ref);
+ closure_put(w->cl);
+ kfree(w);
+ return 0;
+}
+
+static int read_btree_nodes(struct find_btree_nodes *f)
+{
+ struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
+ struct closure cl;
+ int ret = 0;
+
+ closure_init_stack(&cl);
+
+ for_each_online_member(c, ca) {
+ if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
+ continue;
+
+ struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
+ struct task_struct *t;
+
+ if (!w) {
+ percpu_ref_put(&ca->io_ref);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ percpu_ref_get(&ca->io_ref);
+ closure_get(&cl);
+ w->cl = &cl;
+ w->f = f;
+ w->ca = ca;
+
+ t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
+ ret = IS_ERR_OR_NULL(t);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ closure_put(&cl);
+ f->ret = ret;
+ bch_err(c, "error starting kthread: %i", ret);
+ break;
+ }
+ }
+err:
+ closure_sync(&cl);
+ return f->ret ?: ret;
+}
+
+static void bubble_up(struct found_btree_node *n, struct found_btree_node *end)
+{
+ while (n + 1 < end &&
+ found_btree_node_cmp_pos(n, n + 1) > 0) {
+ swap(n[0], n[1]);
+ n++;
+ }
+}
+
+static int handle_overwrites(struct bch_fs *c,
+ struct found_btree_node *start,
+ struct found_btree_node *end)
+{
+ struct found_btree_node *n;
+again:
+ for (n = start + 1;
+ n < end &&
+ n->btree_id == start->btree_id &&
+ n->level == start->level &&
+ bpos_lt(n->min_key, start->max_key);
+ n++) {
+ int cmp = found_btree_node_cmp_time(start, n);
+
+ if (cmp > 0) {
+ if (bpos_cmp(start->max_key, n->max_key) >= 0)
+ n->overwritten = true;
+ else {
+ n->range_updated = true;
+ n->min_key = bpos_successor(start->max_key);
+ n->range_updated = true;
+ bubble_up(n, end);
+ goto again;
+ }
+ } else if (cmp < 0) {
+ BUG_ON(bpos_cmp(n->min_key, start->min_key) <= 0);
+
+ start->max_key = bpos_predecessor(n->min_key);
+ start->range_updated = true;
+ } else if (n->level) {
+ n->overwritten = true;
+ } else {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "overlapping btree nodes with same seq! halting\n ");
+ found_btree_node_to_text(&buf, c, start);
+ prt_str(&buf, "\n ");
+ found_btree_node_to_text(&buf, c, n);
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_scan_for_btree_nodes(struct bch_fs *c)
+{
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+ struct printbuf buf = PRINTBUF;
+ size_t dst;
+ int ret = 0;
+
+ if (f->nodes.nr)
+ return 0;
+
+ mutex_init(&f->lock);
+
+ ret = read_btree_nodes(f);
+ if (ret)
+ return ret;
+
+ if (!f->nodes.nr) {
+ bch_err(c, "%s: no btree nodes found", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (0 && c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes found:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
+
+ dst = 0;
+ darray_for_each(f->nodes, i) {
+ struct found_btree_node *prev = dst ? f->nodes.data + dst - 1 : NULL;
+
+ if (prev &&
+ prev->cookie == i->cookie) {
+ if (prev->nr_ptrs == ARRAY_SIZE(prev->ptrs)) {
+ bch_err(c, "%s: found too many replicas for btree node", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ prev->ptrs[prev->nr_ptrs++] = i->ptrs[0];
+ } else {
+ f->nodes.data[dst++] = *i;
+ }
+ }
+ f->nodes.nr = dst;
+
+ sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+
+ if (0 && c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes after merging replicas:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ dst = 0;
+ darray_for_each(f->nodes, i) {
+ if (i->overwritten)
+ continue;
+
+ ret = handle_overwrites(c, i, &darray_top(f->nodes));
+ if (ret)
+ goto err;
+
+ BUG_ON(i->overwritten);
+ f->nodes.data[dst++] = *i;
+ }
+ f->nodes.nr = dst;
+
+ if (c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes found after overwrites:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int found_btree_node_range_start_cmp(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->max_key, r->min_key);
+}
+
+#define for_each_found_btree_node_in_range(_f, _search, _idx) \
+ for (size_t _idx = eytzinger0_find_gt((_f)->nodes.data, (_f)->nodes.nr, \
+ sizeof((_f)->nodes.data[0]), \
+ found_btree_node_range_start_cmp, &search); \
+ _idx < (_f)->nodes.nr && \
+ (_f)->nodes.data[_idx].btree_id == _search.btree_id && \
+ (_f)->nodes.data[_idx].level == _search.level && \
+ bpos_lt((_f)->nodes.data[_idx].min_key, _search.max_key); \
+ _idx = eytzinger0_next(_idx, (_f)->nodes.nr))
+
+bool bch2_btree_node_is_stale(struct bch_fs *c, struct btree *b)
+{
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+
+ struct found_btree_node search = {
+ .btree_id = b->c.btree_id,
+ .level = b->c.level,
+ .min_key = b->data->min_key,
+ .max_key = b->key.k.p,
+ };
+
+ for_each_found_btree_node_in_range(f, search, idx)
+ if (f->nodes.data[idx].seq > BTREE_NODE_SEQ(b->data))
+ return true;
+ return false;
+}
+
+bool bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree)
+{
+ struct found_btree_node search = {
+ .btree_id = btree,
+ .level = 0,
+ .min_key = POS_MIN,
+ .max_key = SPOS_MAX,
+ };
+
+ for_each_found_btree_node_in_range(&c->found_btree_nodes, search, idx)
+ return true;
+ return false;
+}
+
+int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
+ unsigned level, struct bpos node_min, struct bpos node_max)
+{
+ if (btree_id_is_alloc(btree))
+ return 0;
+
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+
+ int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ return ret;
+
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, "recovering %s l=%u ", bch2_btree_id_str(btree), level);
+ bch2_bpos_to_text(&buf, node_min);
+ prt_str(&buf, " - ");
+ bch2_bpos_to_text(&buf, node_max);
+
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ struct found_btree_node search = {
+ .btree_id = btree,
+ .level = level,
+ .min_key = node_min,
+ .max_key = node_max,
+ };
+
+ for_each_found_btree_node_in_range(f, search, idx) {
+ struct found_btree_node n = f->nodes.data[idx];
+
+ n.range_updated |= bpos_lt(n.min_key, node_min);
+ n.min_key = bpos_max(n.min_key, node_min);
+
+ n.range_updated |= bpos_gt(n.max_key, node_max);
+ n.max_key = bpos_min(n.max_key, node_max);
+
+ struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } tmp;
+
+ found_btree_node_to_key(&tmp.k, &n);
+
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
+ bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+
+ BUG_ON(bch2_bkey_invalid(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0, NULL));
+
+ ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void bch2_find_btree_nodes_exit(struct find_btree_nodes *f)
+{
+ darray_exit(&f->nodes);
+}
diff --git a/fs/bcachefs/btree_node_scan.h b/fs/bcachefs/btree_node_scan.h
new file mode 100644
index 000000000000..08687b209787
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_NODE_SCAN_H
+#define _BCACHEFS_BTREE_NODE_SCAN_H
+
+int bch2_scan_for_btree_nodes(struct bch_fs *);
+bool bch2_btree_node_is_stale(struct bch_fs *, struct btree *);
+bool bch2_btree_has_scanned_nodes(struct bch_fs *, enum btree_id);
+int bch2_get_scanned_nodes(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos);
+void bch2_find_btree_nodes_exit(struct find_btree_nodes *);
+
+#endif /* _BCACHEFS_BTREE_NODE_SCAN_H */
diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h
new file mode 100644
index 000000000000..abb7b27d556a
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan_types.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
+#define _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
+
+#include "darray.h"
+
+struct found_btree_node {
+ bool range_updated:1;
+ bool overwritten:1;
+ u8 btree_id;
+ u8 level;
+ u32 seq;
+ u64 cookie;
+
+ struct bpos min_key;
+ struct bpos max_key;
+
+ unsigned nr_ptrs;
+ struct bch_extent_ptr ptrs[BCH_REPLICAS_MAX];
+};
+
+typedef DARRAY(struct found_btree_node) found_btree_nodes;
+
+struct find_btree_nodes {
+ int ret;
+ struct mutex lock;
+ found_btree_nodes nodes;
+};
+
+#endif /* _BCACHEFS_BTREE_NODE_SCAN_TYPES_H */
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 30d69a6d133e..bbec91e8e650 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -318,7 +318,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
i->k->k.p.snapshot &&
- bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
+ bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
}
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
@@ -397,12 +397,13 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
struct bkey_cached *ck = (void *) path->l[0].b;
unsigned new_u64s;
struct bkey_i *new_k;
+ unsigned watermark = flags & BCH_WATERMARK_MASK;
EBUG_ON(path->level);
- if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
- bch2_btree_key_cache_must_wait(c) &&
- !(flags & BCH_TRANS_COMMIT_journal_reclaim))
+ if (watermark < BCH_WATERMARK_reclaim &&
+ !test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bch2_btree_key_cache_must_wait(c))
return -BCH_ERR_btree_insert_need_journal_reclaim;
/*
@@ -499,9 +500,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
}
static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
- struct btree_insert_entry *btree_id_start)
+ unsigned btree_id_start)
{
- struct btree_insert_entry *i;
bool trans_trigger_run;
int ret, overwrite;
@@ -514,13 +514,13 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
do {
trans_trigger_run = false;
- for (i = btree_id_start;
- i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
+ for (unsigned i = btree_id_start;
+ i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
i++) {
- if (i->btree_id != btree_id)
+ if (trans->updates[i].btree_id != btree_id)
continue;
- ret = run_one_trans_trigger(trans, i, overwrite);
+ ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
if (ret < 0)
return ret;
if (ret)
@@ -534,8 +534,7 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
{
- struct btree_insert_entry *btree_id_start = trans->updates;
- unsigned btree_id = 0;
+ unsigned btree_id = 0, btree_id_start = 0;
int ret = 0;
/*
@@ -549,8 +548,8 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
if (btree_id == BTREE_ID_alloc)
continue;
- while (btree_id_start < trans->updates + trans->nr_updates &&
- btree_id_start->btree_id < btree_id)
+ while (btree_id_start < trans->nr_updates &&
+ trans->updates[btree_id_start].btree_id < btree_id)
btree_id_start++;
ret = run_btree_triggers(trans, btree_id, btree_id_start);
@@ -558,11 +557,13 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
return ret;
}
- trans_for_each_update(trans, i) {
+ for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+ struct btree_insert_entry *i = trans->updates + idx;
+
if (i->btree_id > BTREE_ID_alloc)
break;
if (i->btree_id == BTREE_ID_alloc) {
- ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
+ ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
if (ret)
return ret;
break;
@@ -826,7 +827,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
struct bch_fs *c = trans->c;
int ret = 0, u64s_delta = 0;
- trans_for_each_update(trans, i) {
+ for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+ struct btree_insert_entry *i = trans->updates + idx;
if (i->cached)
continue;
@@ -887,6 +889,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
int ret, unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
switch (ret) {
case -BCH_ERR_btree_insert_btree_node_full:
@@ -905,7 +908,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
* flag
*/
if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
+ watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
break;
}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 9404d96c38f3..c69b233c41bb 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -321,9 +321,9 @@ struct bkey_cached {
struct btree_bkey_cached_common c;
unsigned long flags;
+ unsigned long btree_trans_barrier_seq;
u16 u64s;
bool valid;
- u32 btree_trans_barrier_seq;
struct bkey_cached_key key;
struct rhash_head hash;
@@ -364,7 +364,21 @@ struct btree_insert_entry {
unsigned long ip_allocated;
};
+/* Number of btree paths we preallocate, usually enough */
#define BTREE_ITER_INITIAL 64
+/*
+ * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
+ * paths should run inside this limit, and if they don't it usually indicates a
+ * bug (leaking/duplicated btree paths).
+ *
+ * exception: some fsck paths
+ *
+ * bugs with excessive path usage seem to have possibly been eliminated now, so
+ * we might consider eliminating this (and btree_trans_too_many_iter()) at some
+ * point.
+ */
+#define BTREE_ITER_NORMAL_LIMIT 256
+/* never exceed limit */
#define BTREE_ITER_MAX (1U << 10)
struct btree_trans_commit_hook;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index a4b40c1656a5..8e47e260eba5 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -38,6 +38,9 @@ static noinline int extent_front_merge(struct btree_trans *trans,
struct bkey_i *update;
int ret;
+ if (unlikely(trans->journal_replay_not_finished))
+ return 0;
+
update = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(update);
if (ret)
@@ -69,6 +72,9 @@ static noinline int extent_back_merge(struct btree_trans *trans,
struct bch_fs *c = trans->c;
int ret;
+ if (unlikely(trans->journal_replay_not_finished))
+ return 0;
+
ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
if (ret < 0)
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index b2f5f2e50f7e..b4efd8cc4d1a 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "alloc_foreground.h"
+#include "bkey_buf.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_gc.h"
@@ -18,12 +19,21 @@
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
+#include "recovery_passes.h"
#include "replicas.h"
+#include "sb-members.h"
#include "super-io.h"
#include "trace.h"
#include <linux/random.h>
+static const char * const bch2_btree_update_modes[] = {
+#define x(t) #t,
+ BTREE_UPDATE_MODES()
+#undef x
+ NULL
+};
+
static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
btree_path_idx_t, struct btree *, struct keylist *);
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
@@ -44,56 +54,103 @@ static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans,
return path_idx;
}
-/* Debug code: */
-
/*
* Verify that child nodes correctly span parent node's range:
*/
-static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
+int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bpos next_node = b->data->min_key;
- struct btree_node_iter iter;
+ struct bch_fs *c = trans->c;
+ struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2
+ ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
+ : b->data->min_key;
+ struct btree_and_journal_iter iter;
struct bkey_s_c k;
- struct bkey_s_c_btree_ptr_v2 bp;
- struct bkey unpacked;
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
+ struct bkey_buf prev;
+ int ret = 0;
- BUG_ON(!b->c.level);
+ BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
+ b->data->min_key));
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- return;
+ if (!b->c.level)
+ return 0;
- bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_bkey_buf_init(&prev);
+ bkey_init(&prev.k->k);
+ bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- while (1) {
- k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
if (k.k->type != KEY_TYPE_btree_ptr_v2)
- break;
- bp = bkey_s_c_to_btree_ptr_v2(k);
+ goto out;
- if (!bpos_eq(next_node, bp.v->min_key)) {
- bch2_dump_btree_node(c, b);
- bch2_bpos_to_text(&buf1, next_node);
- bch2_bpos_to_text(&buf2, bp.v->min_key);
- panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
- }
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- bch2_btree_node_iter_advance(&iter, b);
+ struct bpos expected_min = bkey_deleted(&prev.k->k)
+ ? node_min
+ : bpos_successor(prev.k->k.p);
- if (bch2_btree_node_iter_end(&iter)) {
- if (!bpos_eq(k.k->p, b->key.k.p)) {
- bch2_dump_btree_node(c, b);
- bch2_bpos_to_text(&buf1, b->key.k.p);
- bch2_bpos_to_text(&buf2, k.k->p);
- panic("expected end %s got %s\n", buf1.buf, buf2.buf);
- }
- break;
+ if (!bpos_eq(expected_min, bp.v->min_key)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "end of prev node doesn't match start of next node\n"),
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, "\n prev ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
+ prt_str(&buf, "\n next ");
+ bch2_bkey_val_to_text(&buf, c, k);
+
+ need_fsck_err(c, btree_node_topology_bad_min_key, "%s", buf.buf);
+ goto topology_repair;
}
- next_node = bpos_successor(k.k->p);
+ bch2_bkey_buf_reassemble(&prev, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+
+ if (bkey_deleted(&prev.k->k)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "empty interior node\n");
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ need_fsck_err(c, btree_node_topology_empty_interior_node, "%s", buf.buf);
+ goto topology_repair;
+ } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "last child node doesn't end at end of parent node\n");
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, "\n last key ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
+
+ need_fsck_err(c, btree_node_topology_bad_max_key, "%s", buf.buf);
+ goto topology_repair;
}
-#endif
+out:
+fsck_err:
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev, c);
+ printbuf_exit(&buf);
+ return ret;
+topology_repair:
+ if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
+ bch2_inconsistent_error(c);
+ ret = -BCH_ERR_btree_need_topology_repair;
+ } else {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
+ }
+ goto out;
}
/* Calculate ideal packed bkey format for new btree nodes: */
@@ -254,7 +311,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct open_buckets obs = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
- unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
+ unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim
? BTREE_NODE_RESERVE
: 0;
int ret;
@@ -549,6 +606,26 @@ static void btree_update_add_key(struct btree_update *as,
bch2_keylist_push(keys);
}
+static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
+{
+ for_each_keylist_key(&as->new_keys, k)
+ if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k)))
+ return false;
+ return true;
+}
+
+static void btree_update_new_nodes_mark_sb(struct btree_update *as)
+{
+ struct bch_fs *c = as->c;
+
+ mutex_lock(&c->sb_lock);
+ for_each_keylist_key(&as->new_keys, k)
+ bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k));
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+}
+
/*
* The transactional part of an interior btree node update, where we journal the
* update we did to the interior node and update alloc info:
@@ -606,6 +683,9 @@ static void btree_update_nodes_written(struct btree_update *as)
if (ret)
goto err;
+ if (!btree_update_new_nodes_marked_sb(as))
+ btree_update_new_nodes_mark_sb(as);
+
/*
* Wait for any in flight writes to finish before we free the old nodes
* on disk:
@@ -638,7 +718,7 @@ static void btree_update_nodes_written(struct btree_update *as)
* which may require allocations as well.
*/
ret = commit_do(trans, &as->disk_res, &journal_seq,
- BCH_WATERMARK_reclaim|
+ BCH_WATERMARK_interior_updates|
BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_journal_reclaim,
@@ -648,9 +728,13 @@ static void btree_update_nodes_written(struct btree_update *as)
bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
"%s", bch2_err_str(ret));
err:
- if (as->b) {
-
- b = as->b;
+ /*
+ * We have to be careful because another thread might be getting ready
+ * to free as->b and calling btree_update_reparent() on us - we'll
+ * recheck under btree_update_lock below:
+ */
+ b = READ_ONCE(as->b);
+ if (b) {
btree_path_idx_t path_idx = get_unlocked_mut_path(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
@@ -794,15 +878,17 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b)
{
struct bch_fs *c = as->c;
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
-
- BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode != BTREE_UPDATE_none);
+ BUG_ON(as->update_level_end < b->c.level);
BUG_ON(!btree_node_dirty(b));
BUG_ON(!b->c.level);
- as->mode = BTREE_INTERIOR_UPDATING_NODE;
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
+
+ as->mode = BTREE_UPDATE_node;
as->b = b;
+ as->update_level_end = b->c.level;
set_btree_node_write_blocked(b);
list_add(&as->write_blocked_list, &b->write_blocked);
@@ -824,7 +910,7 @@ static void btree_update_reparent(struct btree_update *as,
lockdep_assert_held(&c->btree_interior_update_lock);
child->b = NULL;
- child->mode = BTREE_INTERIOR_UPDATING_AS;
+ child->mode = BTREE_UPDATE_update;
bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal,
bch2_update_reparent_journal_pin_flush);
@@ -835,7 +921,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
struct bkey_i *insert = &b->key;
struct bch_fs *c = as->c;
- BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode != BTREE_UPDATE_none);
BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
ARRAY_SIZE(as->journal_entries));
@@ -849,7 +935,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
- as->mode = BTREE_INTERIOR_UPDATING_ROOT;
+ as->mode = BTREE_UPDATE_root;
mutex_unlock(&c->btree_interior_update_lock);
}
@@ -1027,7 +1113,7 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *
struct bch_fs *c = as->c;
u64 start_time = as->start_time;
- BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode == BTREE_UPDATE_none);
if (as->took_gc_lock)
up_read(&as->c->gc_lock);
@@ -1044,7 +1130,7 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *
static struct btree_update *
bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
- unsigned level, bool split, unsigned flags)
+ unsigned level_start, bool split, unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_update *as;
@@ -1052,7 +1138,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc)
? BCH_DISK_RESERVATION_NOFAIL : 0;
unsigned nr_nodes[2] = { 0, 0 };
- unsigned update_level = level;
+ unsigned level_end = level_start;
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
int ret = 0;
u32 restart_count = trans->restart_count;
@@ -1067,34 +1153,30 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
flags &= ~BCH_WATERMARK_MASK;
flags |= watermark;
- if (watermark < c->journal.watermark) {
- struct journal_res res = { 0 };
- unsigned journal_flags = watermark|JOURNAL_RES_GET_CHECK;
-
- if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark != BCH_WATERMARK_reclaim)
- journal_flags |= JOURNAL_RES_GET_NONBLOCK;
+ if (watermark < BCH_WATERMARK_reclaim &&
+ test_bit(JOURNAL_SPACE_LOW, &c->journal.flags)) {
+ if (flags & BCH_TRANS_COMMIT_journal_reclaim)
+ return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
- ret = drop_locks_do(trans,
- bch2_journal_res_get(&c->journal, &res, 1, journal_flags));
- if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
- ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ bch2_trans_unlock(trans);
+ wait_event(c->journal.wait, !test_bit(JOURNAL_SPACE_LOW, &c->journal.flags));
+ ret = bch2_trans_relock(trans);
if (ret)
return ERR_PTR(ret);
}
while (1) {
- nr_nodes[!!update_level] += 1 + split;
- update_level++;
+ nr_nodes[!!level_end] += 1 + split;
+ level_end++;
- ret = bch2_btree_path_upgrade(trans, path, update_level + 1);
+ ret = bch2_btree_path_upgrade(trans, path, level_end + 1);
if (ret)
return ERR_PTR(ret);
- if (!btree_path_node(path, update_level)) {
+ if (!btree_path_node(path, level_end)) {
/* Allocating new root? */
nr_nodes[1] += split;
- update_level = BTREE_MAX_DEPTH;
+ level_end = BTREE_MAX_DEPTH;
break;
}
@@ -1102,11 +1184,11 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
* Always check for space for two keys, even if we won't have to
* split at prior level - it might have been a merge instead:
*/
- if (bch2_btree_node_insert_fits(path->l[update_level].b,
+ if (bch2_btree_node_insert_fits(path->l[level_end].b,
BKEY_BTREE_PTR_U64s_MAX * 2))
break;
- split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
+ split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
}
if (!down_read_trylock(&c->gc_lock)) {
@@ -1120,13 +1202,15 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
memset(as, 0, sizeof(*as));
closure_init(&as->cl, NULL);
- as->c = c;
- as->start_time = start_time;
- as->ip_started = _RET_IP_;
- as->mode = BTREE_INTERIOR_NO_UPDATE;
- as->took_gc_lock = true;
- as->btree_id = path->btree_id;
- as->update_level = update_level;
+ as->c = c;
+ as->start_time = start_time;
+ as->ip_started = _RET_IP_;
+ as->mode = BTREE_UPDATE_none;
+ as->watermark = watermark;
+ as->took_gc_lock = true;
+ as->btree_id = path->btree_id;
+ as->update_level_start = level_start;
+ as->update_level_end = level_end;
INIT_LIST_HEAD(&as->list);
INIT_LIST_HEAD(&as->unwritten_list);
INIT_LIST_HEAD(&as->write_blocked_list);
@@ -1168,7 +1252,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
*/
if (bch2_err_matches(ret, ENOSPC) &&
(flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark != BCH_WATERMARK_reclaim) {
+ watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
goto err;
}
@@ -1220,23 +1304,29 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
bch2_recalc_btree_reserve(c);
}
-static void bch2_btree_set_root(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
+static int bch2_btree_set_root(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ bool nofail)
{
struct bch_fs *c = as->c;
- struct btree *old;
trace_and_count(c, btree_node_set_root, trans, b);
- old = btree_node_root(c, b);
+ struct btree *old = btree_node_root(c, b);
/*
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+ if (nofail) {
+ bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+ } else {
+ int ret = bch2_btree_node_lock_write(trans, path, &old->c);
+ if (ret)
+ return ret;
+ }
bch2_btree_set_root_inmem(c, b);
@@ -1250,6 +1340,7 @@ static void bch2_btree_set_root(struct btree_update *as,
* depend on the new root would have to update the new root.
*/
bch2_btree_node_unlock_write(trans, path, old);
+ return 0;
}
/* Interior node updates: */
@@ -1316,12 +1407,12 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
}
static void
-__bch2_btree_insert_keys_interior(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter node_iter,
- struct keylist *keys)
+bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter node_iter,
+ struct keylist *keys)
{
struct bkey_i *insert = bch2_keylist_front(keys);
struct bkey_packed *k;
@@ -1380,9 +1471,16 @@ static void __btree_split_node(struct btree_update *as,
if (bkey_deleted(k))
continue;
+ uk = bkey_unpack_key(b, k);
+
+ if (b->c.level &&
+ u64s < n1_u64s &&
+ u64s + k->u64s >= n1_u64s &&
+ bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p))
+ n1_u64s += k->u64s;
+
i = u64s >= n1_u64s;
u64s += k->u64s;
- uk = bkey_unpack_key(b, k);
if (!i)
n1_pos = uk.p;
bch2_bkey_format_add_key(&format[i], &uk);
@@ -1441,8 +1539,7 @@ static void __btree_split_node(struct btree_update *as,
bch2_verify_btree_nr_keys(n[i]);
- if (b->c.level)
- btree_node_interior_verify(as->c, n[i]);
+ BUG_ON(bch2_btree_node_check_topology(trans, n[i]));
}
}
@@ -1471,9 +1568,9 @@ static void btree_split_insert_keys(struct btree_update *as,
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
- __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
- btree_node_interior_verify(as->c, b);
+ BUG_ON(bch2_btree_node_check_topology(trans, b));
}
}
@@ -1488,9 +1585,14 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
u64 start_time = local_clock();
int ret = 0;
+ bch2_verify_btree_nr_keys(b);
BUG_ON(!parent && (b != btree_node_root(c, b)));
BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1));
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret)
+ return ret;
+
bch2_btree_interior_update_will_free_node(as, b);
if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
@@ -1581,15 +1683,16 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
if (parent) {
/* Split a non root node */
ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
- if (ret)
- goto err;
} else if (n3) {
- bch2_btree_set_root(as, trans, trans->paths + path, n3);
+ ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false);
} else {
/* Root filled up but didn't need to be split */
- bch2_btree_set_root(as, trans, trans->paths + path, n1);
+ ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false);
}
+ if (ret)
+ goto err;
+
if (n3) {
bch2_btree_update_get_open_buckets(as, n3);
bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
@@ -1646,27 +1749,6 @@ err:
goto out;
}
-static void
-bch2_btree_insert_keys_interior(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct keylist *keys)
-{
- struct btree_path *linked;
- unsigned i;
-
- __bch2_btree_insert_keys_interior(as, trans, path, b,
- path->l[b->c.level].iter, keys);
-
- btree_update_updated_node(as, b);
-
- trans_for_each_path_with_node(trans, b, linked, i)
- bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
-
- bch2_trans_verify_paths(trans);
-}
-
/**
* bch2_btree_insert_node - insert bkeys into a given btree node
*
@@ -1687,7 +1769,8 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
struct keylist *keys)
{
struct bch_fs *c = as->c;
- struct btree_path *path = trans->paths + path_idx;
+ struct btree_path *path = trans->paths + path_idx, *linked;
+ unsigned i;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
@@ -1710,9 +1793,19 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
goto split;
}
- btree_node_interior_verify(c, b);
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret) {
+ bch2_btree_node_unlock_write(trans, path, b);
+ return ret;
+ }
+
+ bch2_btree_insert_keys_interior(as, trans, path, b,
+ path->l[b->c.level].iter, keys);
+
+ trans_for_each_path_with_node(trans, b, linked, i)
+ bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
- bch2_btree_insert_keys_interior(as, trans, path, b, keys);
+ bch2_trans_verify_paths(trans);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
@@ -1726,16 +1819,17 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
bch2_maybe_compact_whiteouts(c, b))
bch2_trans_node_reinit_iter(trans, b);
+ btree_update_updated_node(as, b);
bch2_btree_node_unlock_write(trans, path, b);
- btree_node_interior_verify(c, b);
+ BUG_ON(bch2_btree_node_check_topology(trans, b));
return 0;
split:
/*
* We could attempt to avoid the transaction restart, by calling
* bch2_btree_path_upgrade() and allocating more nodes:
*/
- if (b->c.level >= as->update_level) {
+ if (b->c.level >= as->update_level_end) {
trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
}
@@ -1801,7 +1895,9 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
bch2_keylist_add(&as->parent_keys, &b->key);
btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys);
- bch2_btree_set_root(as, trans, path, n);
+ int ret = bch2_btree_set_root(as, trans, path, n, true);
+ BUG_ON(ret);
+
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_trans_node_add(trans, path, n);
@@ -1818,9 +1914,12 @@ int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path,
{
struct bch_fs *c = trans->c;
struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b;
+
+ if (btree_node_fake(b))
+ return bch2_btree_split_leaf(trans, path, flags);
+
struct btree_update *as =
- bch2_btree_update_start(trans, trans->paths + path,
- b->c.level, true, flags);
+ bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags);
if (IS_ERR(as))
return PTR_ERR(as);
@@ -1851,6 +1950,22 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
BUG_ON(!trans->paths[path].should_be_locked);
BUG_ON(!btree_node_locked(&trans->paths[path], level));
+ /*
+ * Work around a deadlock caused by the btree write buffer not doing
+ * merges and leaving tons of merges for us to do - we really don't need
+ * to be doing merges at all from the interior update path, and if the
+ * interior update path is generating too many new interior updates we
+ * deadlock:
+ */
+ if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
+ return 0;
+
+ if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
+ flags &= ~BCH_WATERMARK_MASK;
+ flags |= BCH_WATERMARK_btree;
+ flags |= BCH_TRANS_COMMIT_journal_reclaim;
+ }
+
b = trans->paths[path].l[level].b;
if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
@@ -1996,6 +2111,10 @@ err:
bch2_path_put(trans, new_path, true);
bch2_path_put(trans, sib_path, true);
bch2_trans_verify_locks(trans);
+ if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
+ ret = 0;
+ if (!ret)
+ ret = bch2_trans_relock(trans);
return ret;
err_free_update:
bch2_btree_node_free_never_used(as, trans, n);
@@ -2041,12 +2160,13 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys);
- if (ret)
- goto err;
} else {
- bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n);
+ ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false);
}
+ if (ret)
+ goto err;
+
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
@@ -2391,7 +2511,7 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
bch2_btree_set_root_inmem(c, b);
}
-static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
+static int __bch2_btree_root_alloc_fake(struct btree_trans *trans, enum btree_id id, unsigned level)
{
struct bch_fs *c = trans->c;
struct closure cl;
@@ -2410,7 +2530,7 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
set_btree_node_fake(b);
set_btree_node_need_rewrite(b);
- b->c.level = 0;
+ b->c.level = level;
b->c.btree_id = id;
bkey_btree_ptr_init(&b->key);
@@ -2437,9 +2557,23 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
return 0;
}
-void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
+{
+ bch2_trans_run(c, __bch2_btree_root_alloc_fake(trans, id, level));
+}
+
+static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
{
- bch2_trans_run(c, __bch2_btree_root_alloc(trans, id));
+ prt_printf(out, "%ps: btree=%s l=%u-%u watermark=%s mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
+ (void *) as->ip_started,
+ bch2_btree_id_str(as->btree_id),
+ as->update_level_start,
+ as->update_level_end,
+ bch2_watermarks[as->watermark],
+ bch2_btree_update_modes[as->mode],
+ as->nodes_written,
+ closure_nr_remaining(&as->cl),
+ as->journal.seq);
}
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
@@ -2448,12 +2582,7 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->btree_interior_update_lock);
list_for_each_entry(as, &c->btree_interior_update_list, list)
- prt_printf(out, "%ps: mode=%u nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
- (void *) as->ip_started,
- as->mode,
- as->nodes_written,
- closure_nr_remaining(&as->cl),
- as->journal.seq);
+ bch2_btree_update_to_text(out, as);
mutex_unlock(&c->btree_interior_update_lock);
}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index f651dd48aaa0..c1a479ebaad1 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -10,6 +10,20 @@
#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
+int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
+
+#define BTREE_UPDATE_MODES() \
+ x(none) \
+ x(node) \
+ x(root) \
+ x(update)
+
+enum btree_update_mode {
+#define x(n) BTREE_UPDATE_##n,
+ BTREE_UPDATE_MODES()
+#undef x
+};
+
/*
* Tracks an in progress split/rewrite of a btree node and the update to the
* parent node:
@@ -37,24 +51,19 @@ struct btree_update {
struct list_head list;
struct list_head unwritten_list;
- /* What kind of update are we doing? */
- enum {
- BTREE_INTERIOR_NO_UPDATE,
- BTREE_INTERIOR_UPDATING_NODE,
- BTREE_INTERIOR_UPDATING_ROOT,
- BTREE_INTERIOR_UPDATING_AS,
- } mode;
-
+ enum btree_update_mode mode;
+ enum bch_watermark watermark;
unsigned nodes_written:1;
unsigned took_gc_lock:1;
enum btree_id btree_id;
- unsigned update_level;
+ unsigned update_level_start;
+ unsigned update_level_end;
struct disk_reservation disk_res;
/*
- * BTREE_INTERIOR_UPDATING_NODE:
+ * BTREE_UPDATE_node:
* The update that made the new nodes visible was a regular update to an
* existing interior node - @b. We can't write out the update to @b
* until the new nodes we created are finished writing, so we block @b
@@ -163,7 +172,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
struct bkey_i *, unsigned, bool);
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
-void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
+void bch2_btree_root_alloc_fake(struct bch_fs *, enum btree_id, unsigned);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 5cbad8445782..36a6f42aba5e 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -11,6 +11,7 @@
#include "journal_reclaim.h"
#include <linux/prefetch.h>
+#include <linux/sort.h>
static int bch2_btree_write_buffer_journal_flush(struct journal *,
struct journal_entry_pin *, u64);
@@ -46,6 +47,14 @@ static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_ke
#endif
}
+static int wb_key_seq_cmp(const void *_l, const void *_r)
+{
+ const struct btree_write_buffered_key *l = _l;
+ const struct btree_write_buffered_key *r = _r;
+
+ return cmp_int(l->journal_seq, r->journal_seq);
+}
+
/* Compare excluding idx, the low 24 bits: */
static inline bool wb_key_eq(const void *_l, const void *_r)
{
@@ -307,6 +316,16 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
write_locked = false;
+
+ ret = lockrestart_do(trans,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_foreground_maybe_merge(trans, iter.path, 0,
+ BCH_WATERMARK_reclaim|
+ BCH_TRANS_COMMIT_journal_reclaim|
+ BCH_TRANS_COMMIT_no_check_rw|
+ BCH_TRANS_COMMIT_no_enospc));
+ if (ret)
+ goto err;
}
}
@@ -357,6 +376,11 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
*/
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
+ sort(wb->flushing.keys.data,
+ wb->flushing.keys.nr,
+ sizeof(wb->flushing.keys.data[0]),
+ wb_key_seq_cmp, NULL);
+
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
continue;
@@ -368,10 +392,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
ret = commit_do(trans, NULL, NULL,
BCH_WATERMARK_reclaim|
+ BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_journal_res|
- BCH_TRANS_COMMIT_journal_reclaim,
+ BCH_TRANS_COMMIT_no_journal_res ,
btree_write_buffered_insert(trans, i));
if (ret)
goto err;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 96edf2c34d43..941401a210f5 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -525,6 +525,7 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) {
+ BUG();
ret = -EIO;
goto err;
}
@@ -628,6 +629,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
bch2_data_type_str(ptr_data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ BUG();
ret = -EIO;
goto err;
}
@@ -815,14 +817,14 @@ static int __mark_pointer(struct btree_trans *trans,
static int bch2_trigger_pointer(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
- s64 *sectors,
- unsigned flags)
+ const union bch_extent_entry *entry,
+ s64 *sectors, unsigned flags)
{
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
struct bpos bucket;
struct bch_backpointer bp;
- bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
+ bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, entry, &bucket, &bp);
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
@@ -851,7 +853,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_GC) {
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
percpu_down_read(&c->mark_lock);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
@@ -979,7 +981,7 @@ static int __trigger_extent(struct btree_trans *trans,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors;
- ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
+ ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
if (ret < 0)
return ret;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 6387e039f789..f9af5adabe83 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -226,6 +226,7 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_waterma
fallthrough;
case BCH_WATERMARK_btree_copygc:
case BCH_WATERMARK_reclaim:
+ case BCH_WATERMARK_interior_updates:
break;
}
@@ -394,14 +395,6 @@ static inline const char *bch2_data_type_str(enum bch_data_type type)
: "(invalid data type)";
}
-static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
-{
- if (type < BCH_DATA_NR)
- prt_str(out, __bch2_data_types[type]);
- else
- prt_printf(out, "(invalid data type %u)", type);
-}
-
/* disk reservations: */
static inline void bch2_disk_reservation_put(struct bch_fs *c,
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 38defa19d52d..4d14f19f5185 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -7,7 +7,7 @@
#include "chardev.h"
#include "journal.h"
#include "move.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "super.h"
#include "super-io.h"
@@ -134,42 +134,38 @@ static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg
struct fsck_thread {
struct thread_with_stdio thr;
struct bch_fs *c;
- char **devs;
- size_t nr_devs;
struct bch_opts opts;
};
static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
{
struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
- if (thr->devs)
- for (size_t i = 0; i < thr->nr_devs; i++)
- kfree(thr->devs[i]);
- kfree(thr->devs);
kfree(thr);
}
static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
{
struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
- struct bch_fs *c = bch2_fs_open(thr->devs, thr->nr_devs, thr->opts);
-
- if (IS_ERR(c))
- return PTR_ERR(c);
+ struct bch_fs *c = thr->c;
- int ret = 0;
- if (test_bit(BCH_FS_errors_fixed, &c->flags))
- ret |= 1;
- if (test_bit(BCH_FS_error, &c->flags))
- ret |= 4;
+ int ret = PTR_ERR_OR_ZERO(c);
+ if (ret)
+ return ret;
- bch2_fs_stop(c);
+ ret = bch2_fs_start(thr->c);
+ if (ret)
+ goto err;
- if (ret & 1)
+ if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
- if (ret & 4)
+ ret |= 1;
+ }
+ if (test_bit(BCH_FS_error, &c->flags)) {
bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
-
+ ret |= 4;
+ }
+err:
+ bch2_fs_stop(c);
return ret;
}
@@ -182,7 +178,7 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
{
struct bch_ioctl_fsck_offline arg;
struct fsck_thread *thr = NULL;
- u64 *devs = NULL;
+ darray_str(devs) = {};
long ret = 0;
if (copy_from_user(&arg, user_arg, sizeof(arg)))
@@ -194,29 +190,32 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!(devs = kcalloc(arg.nr_devs, sizeof(*devs), GFP_KERNEL)) ||
- !(thr = kzalloc(sizeof(*thr), GFP_KERNEL)) ||
- !(thr->devs = kcalloc(arg.nr_devs, sizeof(*thr->devs), GFP_KERNEL))) {
- ret = -ENOMEM;
- goto err;
- }
+ for (size_t i = 0; i < arg.nr_devs; i++) {
+ u64 dev_u64;
+ ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64));
+ if (ret)
+ goto err;
- thr->opts = bch2_opts_empty();
- thr->nr_devs = arg.nr_devs;
+ char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
+ ret = PTR_ERR_OR_ZERO(dev_str);
+ if (ret)
+ goto err;
- if (copy_from_user(devs, &user_arg->devs[0],
- array_size(sizeof(user_arg->devs[0]), arg.nr_devs))) {
- ret = -EINVAL;
- goto err;
+ ret = darray_push(&devs, dev_str);
+ if (ret) {
+ kfree(dev_str);
+ goto err;
+ }
}
- for (size_t i = 0; i < arg.nr_devs; i++) {
- thr->devs[i] = strndup_user((char __user *)(unsigned long) devs[i], PATH_MAX);
- ret = PTR_ERR_OR_ZERO(thr->devs[i]);
- if (ret)
- goto err;
+ thr = kzalloc(sizeof(*thr), GFP_KERNEL);
+ if (!thr) {
+ ret = -ENOMEM;
+ goto err;
}
+ thr->opts = bch2_opts_empty();
+
if (arg.opts) {
char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
@@ -230,15 +229,28 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
- ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_offline_fsck_ops);
-err:
- if (ret < 0) {
- if (thr)
- bch2_fsck_thread_exit(&thr->thr);
- pr_err("ret %s", bch2_err_str(ret));
- }
- kfree(devs);
+ /* We need request_key() to be called before we punt to kthread: */
+ opt_set(thr->opts, nostart, true);
+
+ bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
+
+ thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
+
+ if (!IS_ERR(thr->c) &&
+ thr->c->opts.errors == BCH_ON_ERROR_panic)
+ thr->c->opts.errors = BCH_ON_ERROR_ro;
+
+ ret = __bch2_run_thread_with_stdio(&thr->thr);
+out:
+ darray_for_each(devs, i)
+ kfree(*i);
+ darray_exit(&devs);
return ret;
+err:
+ if (thr)
+ bch2_fsck_thread_exit(&thr->thr);
+ pr_err("ret %s", bch2_err_str(ret));
+ goto out;
}
static long bch2_global_ioctl(unsigned cmd, void __user *arg)
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 4701457f6381..7ed779b411f6 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -429,15 +429,20 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
extent_nonce(version, crc_old), bio);
if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
- bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
- "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
- __func__,
- crc_old.csum.hi,
- crc_old.csum.lo,
- merged.hi,
- merged.lo,
- bch2_csum_types[crc_old.csum_type],
- bch2_csum_types[new_csum_type]);
+ struct printbuf buf = PRINTBUF;
+ prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
+ "expected %0llx:%0llx got %0llx:%0llx (old type ",
+ __func__,
+ crc_old.csum.hi,
+ crc_old.csum.lo,
+ merged.hi,
+ merged.lo);
+ bch2_prt_csum_type(&buf, crc_old.csum_type);
+ prt_str(&buf, " new type ");
+ bch2_prt_csum_type(&buf, new_csum_type);
+ prt_str(&buf, ")");
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
return -EIO;
}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 1b8c2c1016dc..e40499fde9a4 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -61,11 +61,12 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
struct bch_csum expected,
struct bch_csum got)
{
- prt_printf(out, "checksum error: got ");
+ prt_str(out, "checksum error, type ");
+ bch2_prt_csum_type(out, type);
+ prt_str(out, ": got ");
bch2_csum_to_text(out, type, got);
prt_str(out, " should be ");
bch2_csum_to_text(out, type, expected);
- prt_printf(out, " type %s", bch2_csum_types[type]);
}
int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index 58c2eb45570f..607fd5e232c9 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -47,14 +47,6 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
}
-static inline void bch2_prt_compression_type(struct printbuf *out, enum bch_compression_type type)
-{
- if (type < BCH_COMPRESSION_TYPE_NR)
- prt_str(out, __bch2_compression_types[type]);
- else
- prt_printf(out, "(invalid compression type %u)", type);
-}
-
int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
struct bch_extent_crc_unpacked *);
int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 4150feca42a2..0022b51ce3c0 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -14,6 +14,7 @@
#include "move.h"
#include "nocow_locking.h"
#include "rebalance.h"
+#include "snapshot.h"
#include "subvolume.h"
#include "trace.h"
@@ -509,6 +510,14 @@ int bch2_data_update_init(struct btree_trans *trans,
unsigned ptrs_locked = 0;
int ret = 0;
+ /*
+ * fs is corrupt we have a key for a snapshot node that doesn't exist,
+ * and we have to check for this because we go rw before repairing the
+ * snapshots table - just skip it, we can move it later.
+ */
+ if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
+ return -BCH_ERR_data_update_done;
+
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
m->btree_id = btree_id;
@@ -571,8 +580,7 @@ int bch2_data_update_init(struct btree_trans *trans,
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
- (!atomic_read(&ctxt->read_sectors) &&
- !atomic_read(&ctxt->write_sectors)));
+ list_empty(&ctxt->ios));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks,
@@ -590,6 +598,8 @@ int bch2_data_update_init(struct btree_trans *trans,
i++;
}
+ unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
+
/*
* If current extent durability is less than io_opts.data_replicas,
* we're not trying to rereplicate the extent up to data_replicas here -
@@ -599,7 +609,7 @@ int bch2_data_update_init(struct btree_trans *trans,
* rereplicate, currently, so that users don't get an unexpected -ENOSPC
*/
if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
- durability_have >= io_opts.data_replicas) {
+ !durability_required) {
m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
m->data_opts.rewrite_ptrs = 0;
/* if iter == NULL, it's just a promote */
@@ -608,11 +618,18 @@ int bch2_data_update_init(struct btree_trans *trans,
goto done;
}
- m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
+ m->op.nr_replicas = min(durability_removing, durability_required) +
m->data_opts.extra_replicas;
- m->op.nr_replicas_required = m->op.nr_replicas;
- BUG_ON(!m->op.nr_replicas);
+ /*
+ * If device(s) were set to durability=0 after data was written to them
+ * we can end up with a duribilty=0 extent, and the normal algorithm
+ * that tries not to increase durability doesn't work:
+ */
+ if (!(durability_have + durability_removing))
+ m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
+
+ m->op.nr_replicas_required = m->op.nr_replicas;
if (reserve_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 208ce6f0fc43..cd99b7399414 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -13,6 +13,7 @@
#include "btree_iter.h"
#include "btree_locking.h"
#include "btree_update.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "debug.h"
#include "error.h"
@@ -668,7 +669,7 @@ static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
i->size = size;
i->ret = 0;
- do {
+ while (1) {
err = flush_buf(i);
if (err)
return err;
@@ -676,9 +677,12 @@ static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
if (!i->size)
break;
+ if (done)
+ break;
+
done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
i->iter++;
- } while (!done);
+ }
if (i->buf.allocation_failure)
return -ENOMEM;
@@ -693,13 +697,45 @@ static const struct file_operations journal_pins_ops = {
.read = bch2_journal_pins_read,
};
+static ssize_t bch2_btree_updates_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ if (!i->iter) {
+ bch2_btree_updates_to_text(&i->buf, c);
+ i->iter++;
+ }
+
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations btree_updates_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_btree_updates_read,
+};
+
static int btree_transaction_stats_open(struct inode *inode, struct file *file)
{
struct bch_fs *c = inode->i_private;
struct dump_iter *i;
i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
-
if (!i)
return -ENOMEM;
@@ -866,6 +902,20 @@ void bch2_fs_debug_exit(struct bch_fs *c)
debugfs_remove_recursive(c->fs_debug_dir);
}
+static void bch2_fs_debug_btree_init(struct bch_fs *c, struct btree_debug *bd)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir);
+
+ debugfs_create_file("keys", 0400, d, bd, &btree_debug_ops);
+
+ debugfs_create_file("formats", 0400, d, bd, &btree_format_debug_ops);
+
+ debugfs_create_file("bfloat-failed", 0400, d, bd,
+ &bfloat_failed_debug_ops);
+}
+
void bch2_fs_debug_init(struct bch_fs *c)
{
struct btree_debug *bd;
@@ -888,6 +938,9 @@ void bch2_fs_debug_init(struct bch_fs *c)
debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
c->btree_debug, &journal_pins_ops);
+ debugfs_create_file("btree_updates", 0400, c->fs_debug_dir,
+ c->btree_debug, &btree_updates_ops);
+
debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
c, &btree_transaction_stats_op);
@@ -902,21 +955,7 @@ void bch2_fs_debug_init(struct bch_fs *c)
bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
bd++) {
bd->id = bd - c->btree_debug;
- debugfs_create_file(bch2_btree_id_str(bd->id),
- 0400, c->btree_debug_dir, bd,
- &btree_debug_ops);
-
- snprintf(name, sizeof(name), "%s-formats",
- bch2_btree_id_str(bd->id));
-
- debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
- &btree_format_debug_ops);
-
- snprintf(name, sizeof(name), "%s-bfloat-failed",
- bch2_btree_id_str(bd->id));
-
- debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
- &bfloat_failed_debug_ops);
+ bch2_fs_debug_btree_init(c, bd);
}
}
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 082075244e16..556a217108d3 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -131,29 +131,33 @@ fsck_err:
void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
+ const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
+ struct bch_stripe s = {};
+
+ memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
+
+ unsigned nr_data = s.nr_blocks - s.nr_redundant;
+
+ prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
+ s.algorithm,
+ le16_to_cpu(s.sectors),
+ nr_data,
+ s.nr_redundant);
+ bch2_prt_csum_type(out, s.csum_type);
+ prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
+
+ for (unsigned i = 0; i < s.nr_blocks; i++) {
+ const struct bch_extent_ptr *ptr = sp->ptrs + i;
+
+ if ((void *) ptr >= bkey_val_end(k))
+ break;
+
+ bch2_extent_ptr_to_text(out, c, ptr);
- prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
- s->algorithm,
- le16_to_cpu(s->sectors),
- nr_data,
- s->nr_redundant,
- s->csum_type,
- 1U << s->csum_granularity_bits);
-
- for (i = 0; i < s->nr_blocks; i++) {
- const struct bch_extent_ptr *ptr = s->ptrs + i;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- u32 offset;
- u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
-
- prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
- if (i < nr_data)
- prt_printf(out, "#%u", stripe_blockcount_get(s, i));
- prt_printf(out, " gen %u", ptr->gen);
- if (ptr_stale(ca, ptr))
- prt_printf(out, " stale");
+ if (s.csum_type < BCH_CSUM_NR &&
+ i < nr_data &&
+ stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
+ prt_printf(out, "#%u", stripe_blockcount_get(sp, i));
}
}
@@ -607,10 +611,8 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
struct printbuf err = PRINTBUF;
struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
- prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
- want.hi, want.lo,
- got.hi, got.lo,
- bch2_csum_types[v->csum_type]);
+ prt_str(&err, "stripe ");
+ bch2_csum_err_msg(&err, v->csum_type, want, got);
prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
bch_err_ratelimited(ca, "%s", err.buf);
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index f4369b02e805..f042616888b0 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -32,6 +32,8 @@ static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
unsigned dev, unsigned csum_idx)
{
+ EBUG_ON(s->csum_type >= BCH_CSUM_NR);
+
unsigned csum_bytes = bch_crc_bytes[s->csum_type];
return sizeof(struct bch_stripe) +
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index af25d8ec60f2..01a79fa3eacb 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -252,7 +252,8 @@
x(BCH_ERR_nopromote, nopromote_in_flight) \
x(BCH_ERR_nopromote, nopromote_no_writes) \
x(BCH_ERR_nopromote, nopromote_enomem) \
- x(0, need_inode_lock)
+ x(0, need_inode_lock) \
+ x(0, invalid_snapshot_node)
enum bch_errcode {
BCH_ERR_START = 2048,
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 043431206799..82a6656c941c 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "error.h"
-#include "recovery.h"
+#include "journal.h"
+#include "recovery_passes.h"
#include "super.h"
#include "thread_with_file.h"
@@ -16,7 +17,8 @@ bool bch2_inconsistent_error(struct bch_fs *c)
return false;
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
- bch_err(c, "inconsistency detected - emergency read only");
+ bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
+ journal_cur_seq(&c->journal));
return true;
case BCH_ON_ERROR_panic:
panic(bch2_fmt(c, "panic after error"));
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index ae1d6674c512..36caedf72d89 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -32,6 +32,12 @@ bool bch2_inconsistent_error(struct bch_fs *);
int bch2_topology_error(struct bch_fs *);
+#define bch2_fs_topology_error(c, ...) \
+({ \
+ bch_err(c, "btree topology error: " __VA_ARGS__); \
+ bch2_topology_error(c); \
+})
+
#define bch2_fs_inconsistent(c, ...) \
({ \
bch_err(c, __VA_ARGS__); \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 61395b113df9..1a331e539204 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -189,13 +189,18 @@ int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
enum bkey_invalid_flags flags,
struct printbuf *err)
{
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
int ret = 0;
- bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
- btree_ptr_v2_val_too_big,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
+ c, err, btree_ptr_v2_val_too_big,
"value too big (%zu > %zu)",
bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
+ bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
+ c, err, btree_ptr_v2_min_key_bad,
+ "min_key > key");
+
ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
fsck_err:
return ret;
@@ -973,6 +978,33 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
return bkey_deleted(k.k);
}
+void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
+{
+ struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+ ? bch_dev_bkey_exists(c, ptr->dev)
+ : NULL;
+
+ if (!ca) {
+ prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
+ (u64) ptr->offset, ptr->gen,
+ ptr->cached ? " cached" : "");
+ } else {
+ u32 offset;
+ u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+
+ prt_printf(out, "ptr: %u:%llu:%u gen %u",
+ ptr->dev, b, offset, ptr->gen);
+ if (ptr->cached)
+ prt_str(out, " cached");
+ if (ptr->unwritten)
+ prt_str(out, " unwritten");
+ if (b >= ca->mi.first_bucket &&
+ b < ca->mi.nbuckets &&
+ ptr_stale(ca, ptr))
+ prt_printf(out, " stale");
+ }
+}
+
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
@@ -988,42 +1020,22 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " ");
switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr: {
- const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
- struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
- ? bch_dev_bkey_exists(c, ptr->dev)
- : NULL;
-
- if (!ca) {
- prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
- (u64) ptr->offset, ptr->gen,
- ptr->cached ? " cached" : "");
- } else {
- u32 offset;
- u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
-
- prt_printf(out, "ptr: %u:%llu:%u gen %u",
- ptr->dev, b, offset, ptr->gen);
- if (ptr->cached)
- prt_str(out, " cached");
- if (ptr->unwritten)
- prt_str(out, " unwritten");
- if (ca && ptr_stale(ca, ptr))
- prt_printf(out, " stale");
- }
+ case BCH_EXTENT_ENTRY_ptr:
+ bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
break;
- }
+
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128: {
struct bch_extent_crc_unpacked crc =
bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
+ prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
crc.compressed_size,
crc.uncompressed_size,
- crc.offset, crc.nonce,
- bch2_csum_types[crc.csum_type]);
+ crc.offset, crc.nonce);
+ bch2_prt_csum_type(out, crc.csum_type);
+ prt_str(out, " compress ");
bch2_prt_compression_type(out, crc.compression_type);
break;
}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index fd2669cdd76f..528e817eacbd 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -596,30 +596,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
return ret;
}
-static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
-{
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- return BCH_DATA_btree;
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return BCH_DATA_user;
- case KEY_TYPE_stripe: {
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
-
- BUG_ON(ptr < s.v->ptrs ||
- ptr >= s.v->ptrs + s.v->nr_blocks);
-
- return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
- ? BCH_DATA_parity
- : BCH_DATA_user;
- }
- default:
- BUG();
- }
-}
-
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
@@ -700,6 +676,7 @@ bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
diff --git a/fs/bcachefs/eytzinger.c b/fs/bcachefs/eytzinger.c
new file mode 100644
index 000000000000..0f955c3c76a7
--- /dev/null
+++ b/fs/bcachefs/eytzinger.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "eytzinger.h"
+
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 (swap_r_func_t)0
+#define SWAP_WORDS_32 (swap_r_func_t)1
+#define SWAP_BYTES (swap_r_func_t)2
+#define SWAP_WRAPPER (swap_r_func_t)3
+
+struct wrapper {
+ cmp_func_t cmp;
+ swap_func_t swap_func;
+};
+
+/*
+ * The function pointer is last to make tail calls most efficient if the
+ * compiler decides not to inline this function.
+ */
+static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
+{
+ if (swap_func == SWAP_WRAPPER) {
+ ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
+ return;
+ }
+
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, (int)size, priv);
+}
+
+#define _CMP_WRAPPER ((cmp_r_func_t)0L)
+
+static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
+{
+ if (cmp == _CMP_WRAPPER)
+ return ((const struct wrapper *)priv)->cmp(a, b);
+ return cmp(a, b, priv);
+}
+
+static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
+ cmp_r_func_t cmp_func, const void *priv,
+ size_t l, size_t r)
+{
+ return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ cmp_func, priv);
+}
+
+static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
+ swap_r_func_t swap_func, const void *priv,
+ size_t l, size_t r)
+{
+ do_swap(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ size, swap_func, priv);
+}
+
+void eytzinger0_sort_r(void *base, size_t n, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ int i, c, r;
+
+ /* called from 'sort' without swap function, let's pick the default */
+ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
+ swap_func = NULL;
+
+ if (!swap_func) {
+ if (is_aligned(base, size, 8))
+ swap_func = SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ swap_func = SWAP_WORDS_32;
+ else
+ swap_func = SWAP_BYTES;
+ }
+
+ /* heapify */
+ for (i = n / 2 - 1; i >= 0; --i) {
+ for (r = i; r * 2 + 1 < n; r = c) {
+ c = r * 2 + 1;
+
+ if (c + 1 < n &&
+ eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
+ c++;
+
+ if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
+ break;
+
+ eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
+ }
+ }
+
+ /* sort */
+ for (i = n - 1; i > 0; --i) {
+ eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
+
+ for (r = 0; r * 2 + 1 < i; r = c) {
+ c = r * 2 + 1;
+
+ if (c + 1 < i &&
+ eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
+ c++;
+
+ if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
+ break;
+
+ eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
+ }
+ }
+}
+
+void eytzinger0_sort(void *base, size_t n, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap_func = swap_func,
+ };
+
+ return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+}
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
index b04750dbf870..24840aee335c 100644
--- a/fs/bcachefs/eytzinger.h
+++ b/fs/bcachefs/eytzinger.h
@@ -5,23 +5,33 @@
#include <linux/bitops.h>
#include <linux/log2.h>
-#include "util.h"
+#ifdef EYTZINGER_DEBUG
+#define EYTZINGER_BUG_ON(cond) BUG_ON(cond)
+#else
+#define EYTZINGER_BUG_ON(cond)
+#endif
/*
* Traversal for trees in eytzinger layout - a full binary tree layed out in an
- * array
- */
-
-/*
- * One based indexing version:
+ * array.
+ *
+ * Consider using an eytzinger tree any time you would otherwise be doing binary
+ * search over an array. Binary search is a worst case scenario for branch
+ * prediction and prefetching, but in an eytzinger tree every node's children
+ * are adjacent in memory, thus we can prefetch children before knowing the
+ * result of the comparison, assuming multiple nodes fit on a cacheline.
*
- * With one based indexing each level of the tree starts at a power of two -
- * good for cacheline alignment:
+ * Two variants are provided, for one based indexing and zero based indexing.
+ *
+ * Zero based indexing is more convenient, but one based indexing has better
+ * alignment and thus better performance because each new level of the tree
+ * starts at a power of two, and thus if element 0 was cacheline aligned, each
+ * new level will be as well.
*/
static inline unsigned eytzinger1_child(unsigned i, unsigned child)
{
- EBUG_ON(child > 1);
+ EYTZINGER_BUG_ON(child > 1);
return (i << 1) + child;
}
@@ -58,7 +68,7 @@ static inline unsigned eytzinger1_last(unsigned size)
static inline unsigned eytzinger1_next(unsigned i, unsigned size)
{
- EBUG_ON(i > size);
+ EYTZINGER_BUG_ON(i > size);
if (eytzinger1_right_child(i) <= size) {
i = eytzinger1_right_child(i);
@@ -74,7 +84,7 @@ static inline unsigned eytzinger1_next(unsigned i, unsigned size)
static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
{
- EBUG_ON(i > size);
+ EYTZINGER_BUG_ON(i > size);
if (eytzinger1_left_child(i) <= size) {
i = eytzinger1_left_child(i) + 1;
@@ -101,7 +111,7 @@ static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
unsigned shift = __fls(size) - b;
int s;
- EBUG_ON(!i || i > size);
+ EYTZINGER_BUG_ON(!i || i > size);
i ^= 1U << b;
i <<= 1;
@@ -126,7 +136,7 @@ static inline unsigned __inorder_to_eytzinger1(unsigned i, unsigned size,
unsigned shift;
int s;
- EBUG_ON(!i || i > size);
+ EYTZINGER_BUG_ON(!i || i > size);
/*
* sign bit trick:
@@ -164,7 +174,7 @@ static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
static inline unsigned eytzinger0_child(unsigned i, unsigned child)
{
- EBUG_ON(child > 1);
+ EYTZINGER_BUG_ON(child > 1);
return (i << 1) + 1 + child;
}
@@ -231,11 +241,9 @@ static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
(_i) != -1; \
(_i) = eytzinger0_next((_i), (_size)))
-typedef int (*eytzinger_cmp_fn)(const void *l, const void *r, size_t size);
-
/* return greatest node <= @search, or -1 if not found */
-static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
- eytzinger_cmp_fn cmp, const void *search)
+static inline int eytzinger0_find_le(void *base, size_t nr, size_t size,
+ cmp_func_t cmp, const void *search)
{
unsigned i, n = 0;
@@ -244,21 +252,38 @@ static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
do {
i = n;
- n = eytzinger0_child(i, cmp(search, base + i * size, size) >= 0);
+ n = eytzinger0_child(i, cmp(base + i * size, search) <= 0);
} while (n < nr);
if (n & 1) {
- /* @i was greater than @search, return previous node: */
-
- if (i == eytzinger0_first(nr))
- return -1;
-
+ /*
+ * @i was greater than @search, return previous node:
+ *
+ * if @i was leftmost/smallest element,
+ * eytzinger0_prev(eytzinger0_first())) returns -1, as expected
+ */
return eytzinger0_prev(i, nr);
} else {
return i;
}
}
+static inline int eytzinger0_find_gt(void *base, size_t nr, size_t size,
+ cmp_func_t cmp, const void *search)
+{
+ ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
+
+ /*
+ * if eytitzinger0_find_le() returned -1 - no element was <= search - we
+ * want to return the first element; next/prev identities mean this work
+ * as expected
+ *
+ * similarly if find_le() returns last element, we should return -1;
+ * identities mean this all works out:
+ */
+ return eytzinger0_next(idx, nr);
+}
+
#define eytzinger0_find(base, nr, size, _cmp, search) \
({ \
void *_base = (base); \
@@ -269,13 +294,13 @@ static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
int _res; \
\
while (_i < _nr && \
- (_res = _cmp(_search, _base + _i * _size, _size))) \
+ (_res = _cmp(_search, _base + _i * _size))) \
_i = eytzinger0_child(_i, _res > 0); \
_i; \
})
-void eytzinger0_sort(void *, size_t, size_t,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t));
+void eytzinger0_sort_r(void *, size_t, size_t,
+ cmp_r_func_t, swap_r_func_t, const void *);
+void eytzinger0_sort(void *, size_t, size_t, cmp_func_t, swap_func_t);
#endif /* _EYTZINGER_H */
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 33cb6da3a5ad..b889370a5088 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -387,6 +387,8 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
+ bch2_write_ref_put(dio->op.c, BCH_WRITE_REF_dio_write);
+
/* inode->i_dio_count is our ref on inode and thus bch_fs */
inode_dio_end(&inode->v);
@@ -536,7 +538,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
if (likely(!dio->iter.count) || dio->op.error)
break;
- bio_reset(bio, NULL, REQ_OP_WRITE);
+ bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
}
out:
return bch2_dio_write_done(dio);
@@ -590,22 +592,25 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
prefetch(&inode->ei_inode);
prefetch((void *) &inode->ei_inode + 64);
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write))
+ return -EROFS;
+
inode_lock(&inode->v);
ret = generic_write_checks(req, iter);
if (unlikely(ret <= 0))
- goto err;
+ goto err_put_write_ref;
ret = file_remove_privs(file);
if (unlikely(ret))
- goto err;
+ goto err_put_write_ref;
ret = file_update_time(file);
if (unlikely(ret))
- goto err;
+ goto err_put_write_ref;
if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
- goto err;
+ goto err_put_write_ref;
inode_dio_begin(&inode->v);
bch2_pagecache_block_get(inode);
@@ -618,7 +623,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_WRITE,
+ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
@@ -645,7 +650,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
}
ret = bch2_dio_write_loop(dio);
-err:
+out:
if (locked)
inode_unlock(&inode->v);
return ret;
@@ -653,7 +658,9 @@ err_put_bio:
bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
- goto err;
+err_put_write_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
+ goto out;
}
void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 8c70123b6a0c..20b40477425f 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -174,18 +174,18 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
static int bch2_flush_inode(struct bch_fs *c,
struct bch_inode_info *inode)
{
- struct bch_inode_unpacked u;
- int ret;
-
if (c->opts.journal_flush_disabled)
return 0;
- ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
- if (ret)
- return ret;
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync))
+ return -EROFS;
- return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
- bch2_inode_flush_nocow_writes(c, inode);
+ struct bch_inode_unpacked u;
+ int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?:
+ bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
+ return ret;
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 0ccee05f6887..fce690007edf 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -188,7 +188,8 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
BUG_ON(!old);
if (unlikely(old != inode)) {
- discard_new_inode(&inode->v);
+ __destroy_inode(&inode->v);
+ kmem_cache_free(bch2_inode_cache, inode);
inode = old;
} else {
mutex_lock(&c->vfs_inodes_lock);
@@ -225,8 +226,10 @@ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
if (unlikely(!inode)) {
int ret = drop_locks_do(trans, (inode = to_bch_ei(new_inode(c->vfs_sb))) ? 0 : -ENOMEM);
- if (ret && inode)
- discard_new_inode(&inode->v);
+ if (ret && inode) {
+ __destroy_inode(&inode->v);
+ kmem_cache_free(bch2_inode_cache, inode);
+ }
if (ret)
return ERR_PTR(ret);
}
@@ -1997,6 +2000,7 @@ out:
return dget(sb->s_root);
err_put_super:
+ __bch2_fs_stop(c);
deactivate_locked_super(sb);
return ERR_PTR(bch2_err_class(ret));
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 47d4eefaba7b..8e2010212cc3 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -12,7 +12,7 @@
#include "fsck.h"
#include "inode.h"
#include "keylist.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "snapshot.h"
#include "super.h"
#include "xattr.h"
@@ -63,9 +63,7 @@ static int subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum)
{
struct bch_subvolume s;
- int ret;
-
- ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
+ int ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
*snapshot = le32_to_cpu(s.snapshot);
*inum = le64_to_cpu(s.inode);
@@ -158,9 +156,10 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
- ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
- &dir_hash_info, &iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
+ &dir_hash_info, &iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
bch2_trans_iter_exit(trans, &iter);
err:
bch_err_fn(c, ret);
@@ -169,7 +168,8 @@ err:
/* Get lost+found, create if it doesn't exist: */
static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
- struct bch_inode_unpacked *lostfound)
+ struct bch_inode_unpacked *lostfound,
+ u64 reattaching_inum)
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
@@ -184,19 +184,36 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
return ret;
subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
- u32 subvol_snapshot;
- ret = subvol_lookup(trans, le32_to_cpu(st.master_subvol),
- &subvol_snapshot, &root_inum.inum);
- bch_err_msg(c, ret, "looking up root subvol");
+ struct bch_subvolume subvol;
+ ret = bch2_subvolume_get(trans, le32_to_cpu(st.master_subvol),
+ false, 0, &subvol);
+ bch_err_msg(c, ret, "looking up root subvol %u for snapshot %u",
+ le32_to_cpu(st.master_subvol), snapshot);
if (ret)
return ret;
+ if (!subvol.inode) {
+ struct btree_iter iter;
+ struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, le32_to_cpu(st.master_subvol)),
+ 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(subvol);
+ if (ret)
+ return ret;
+
+ subvol->v.inode = cpu_to_le64(reattaching_inum);
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ root_inum.inum = le64_to_cpu(subvol.inode);
+
struct bch_inode_unpacked root_inode;
struct bch_hash_info root_hash_info;
u32 root_inode_snapshot = snapshot;
ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
- bch_err_msg(c, ret, "looking up root inode");
+ bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
+ root_inum.inum, le32_to_cpu(st.master_subvol));
if (ret)
return ret;
@@ -292,7 +309,7 @@ static int reattach_inode(struct btree_trans *trans,
snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
}
- ret = lookup_lostfound(trans, dirent_snapshot, &lostfound);
+ ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
if (ret)
return ret;
@@ -363,6 +380,112 @@ static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume
return ret;
}
+static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
+{
+ struct bch_fs *c = trans->c;
+
+ if (!bch2_snapshot_is_leaf(c, snapshotid)) {
+ bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+
+ /*
+ * If inum isn't set, that means we're being called from check_dirents,
+ * not check_inodes - the root of this subvolume doesn't exist or we
+ * would have found it there:
+ */
+ if (!inum) {
+ struct btree_iter inode_iter = {};
+ struct bch_inode_unpacked new_inode;
+ u64 cpu = raw_smp_processor_id();
+
+ bch2_inode_init_early(c, &new_inode);
+ bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
+
+ new_inode.bi_subvol = subvolid;
+
+ int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
+ bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_inode_write(trans, &inode_iter, &new_inode);
+ bch2_trans_iter_exit(trans, &inode_iter);
+ if (ret)
+ return ret;
+
+ inum = new_inode.bi_inum;
+ }
+
+ bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
+
+ struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
+ int ret = PTR_ERR_OR_ZERO(new_subvol);
+ if (ret)
+ return ret;
+
+ bkey_subvolume_init(&new_subvol->k_i);
+ new_subvol->k.p.offset = subvolid;
+ new_subvol->v.snapshot = cpu_to_le32(snapshotid);
+ new_subvol->v.inode = cpu_to_le64(inum);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
+ if (ret)
+ return ret;
+
+ struct btree_iter iter;
+ struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshots, POS(0, snapshotid),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(s);
+ bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
+ if (ret)
+ return ret;
+
+ u32 snapshot_tree = le32_to_cpu(s->v.tree);
+
+ s->v.subvol = cpu_to_le32(subvolid);
+ SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
+ bch2_trans_iter_exit(trans, &iter);
+
+ struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
+ 0, snapshot_tree);
+ ret = PTR_ERR_OR_ZERO(st);
+ bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
+ if (ret)
+ return ret;
+
+ if (!st->v.master_subvol)
+ st->v.master_subvol = cpu_to_le32(subvolid);
+
+ bch2_trans_iter_exit(trans, &iter);
+ return 0;
+}
+
+static int reconstruct_inode(struct btree_trans *trans, u32 snapshot, u64 inum, u64 size, unsigned mode)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked new_inode;
+
+ bch2_inode_init_early(c, &new_inode);
+ bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, mode|0755, 0, NULL);
+ new_inode.bi_size = size;
+ new_inode.bi_inum = inum;
+
+ return __bch2_fsck_write_inode(trans, &new_inode, snapshot);
+}
+
+static int reconstruct_reg_inode(struct btree_trans *trans, u32 snapshot, u64 inum)
+{
+ struct btree_iter iter = {};
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter);
+ bch2_trans_iter_exit(trans, &iter);
+ int ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ return reconstruct_inode(trans, snapshot, inum, k.k->p.offset << 9, S_IFREG);
+}
+
struct snapshots_seen_entry {
u32 id;
u32 equiv;
@@ -1064,6 +1187,11 @@ static int check_inode(struct btree_trans *trans,
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
+ if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
+ ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
+ goto do_update;
+ }
+
if (fsck_err_on(ret,
c, inode_bi_subvol_missing,
"inode %llu:%u bi_subvol points to missing subvolume %u",
@@ -1081,7 +1209,7 @@ static int check_inode(struct btree_trans *trans,
do_update = true;
}
}
-
+do_update:
if (do_update) {
ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
bch_err_msg(c, ret, "in fsck updating inode");
@@ -1130,8 +1258,8 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal
i->count = count2;
if (i->count != count2) {
- bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
- w->last_pos.inode, i->snapshot, i->count, count2);
+ bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
+ w->last_pos.inode, i->snapshot, i->count, count2);
return -BCH_ERR_internal_fsck_err;
}
@@ -1371,10 +1499,6 @@ static int check_overlapping_extents(struct btree_trans *trans,
goto err;
}
- ret = extent_ends_at(c, extent_ends, seen, k);
- if (ret)
- goto err;
-
extent_ends->last_pos = k.k->p;
err:
return ret;
@@ -1438,6 +1562,17 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
if (k.k->type != KEY_TYPE_whiteout) {
+ if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
+ ret = reconstruct_reg_inode(trans, k.k->p.snapshot, k.k->p.inode) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ inode->last_pos.inode--;
+ ret = -BCH_ERR_transaction_restart_nested;
+ goto err;
+ }
+
if (fsck_err_on(!i, c, extent_in_missing_inode,
"extent in missing inode:\n %s",
(printbuf_reset(&buf),
@@ -1504,6 +1639,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
i->seen_this_pos = true;
}
+
+ if (k.k->type != KEY_TYPE_whiteout) {
+ ret = extent_ends_at(c, extent_ends, s, k);
+ if (ret)
+ goto err;
+ }
out:
err:
fsck_err:
@@ -1584,8 +1725,8 @@ static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_
return count2;
if (i->count != count2) {
- bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
- i->count, count2);
+ bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
+ w->last_pos.inode, i->snapshot, i->count, count2);
i->count = count2;
if (i->inode.bi_nlink == i->count)
continue;
@@ -1782,6 +1923,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
u32 parent_snapshot;
+ u32 new_parent_subvol = 0;
u64 parent_inum;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -1790,6 +1932,27 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
+ if (ret ||
+ (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
+ int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
+ if (ret2 && !bch2_err_matches(ret, ENOENT))
+ return ret2;
+ }
+
+ if (ret &&
+ !new_parent_subvol &&
+ (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
+ /*
+ * Couldn't find a subvol for dirent's snapshot - but we lost
+ * subvols, so we need to reconstruct:
+ */
+ ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
+ if (ret)
+ return ret;
+
+ parent_snapshot = d.k->p.snapshot;
+ }
+
if (fsck_err_on(ret, c, dirent_to_missing_parent_subvol,
"dirent parent_subvol points to missing subvolume\n%s",
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
@@ -1798,10 +1961,10 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
"dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
parent_snapshot,
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
- u32 new_parent_subvol;
- ret = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
- if (ret)
- goto err;
+ if (!new_parent_subvol) {
+ bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
ret = PTR_ERR_OR_ZERO(new_dirent);
@@ -1847,9 +2010,16 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
ret = lookup_inode(trans, target_inum, &subvol_root, &target_snapshot);
if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
+ goto err;
+
+ if (ret) {
+ bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ ret = 0;
+ goto err;
+ }
- if (fsck_err_on(parent_subvol != subvol_root.bi_parent_subvol,
+ if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
c, inode_bi_parent_wrong,
"subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
target_inum,
@@ -1857,13 +2027,13 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
subvol_root.bi_parent_subvol = parent_subvol;
ret = __bch2_fsck_write_inode(trans, &subvol_root, target_snapshot);
if (ret)
- return ret;
+ goto err;
}
ret = check_dirent_target(trans, iter, d, &subvol_root,
target_snapshot);
if (ret)
- return ret;
+ goto err;
out:
err:
fsck_err:
@@ -1880,7 +2050,6 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct snapshots_seen *s)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c_dirent d;
struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
struct bpos equiv;
@@ -1919,6 +2088,17 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
dir->first_this_inode = false;
+ if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
+ ret = reconstruct_inode(trans, k.k->p.snapshot, k.k->p.inode, 0, S_IFDIR) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ dir->last_pos.inode--;
+ ret = -BCH_ERR_transaction_restart_nested;
+ goto err;
+ }
+
if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
"dirent in nonexisting directory:\n%s",
(printbuf_reset(&buf),
@@ -1953,7 +2133,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (k.k->type != KEY_TYPE_dirent)
goto out;
- d = bkey_s_c_to_dirent(k);
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
if (d.v->d_type == DT_SUBVOL) {
ret = check_dirent_to_subvol(trans, iter, d);
@@ -2098,17 +2278,21 @@ static int check_root_trans(struct btree_trans *trans)
if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
"root subvol missing")) {
- struct bkey_i_subvolume root_subvol;
+ struct bkey_i_subvolume *root_subvol =
+ bch2_trans_kmalloc(trans, sizeof(*root_subvol));
+ ret = PTR_ERR_OR_ZERO(root_subvol);
+ if (ret)
+ goto err;
snapshot = U32_MAX;
inum = BCACHEFS_ROOT_INO;
- bkey_subvolume_init(&root_subvol.k_i);
- root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_subvol.v.flags = 0;
- root_subvol.v.snapshot = cpu_to_le32(snapshot);
- root_subvol.v.inode = cpu_to_le64(inum);
- ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol.k_i, 0);
+ bkey_subvolume_init(&root_subvol->k_i);
+ root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_subvol->v.flags = 0;
+ root_subvol->v.snapshot = cpu_to_le32(snapshot);
+ root_subvol->v.inode = cpu_to_le64(inum);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
bch_err_msg(c, ret, "writing root subvol");
if (ret)
goto err;
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 2b5e06770ab3..ca4a066e9a54 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -552,8 +552,8 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
prt_newline(out);
- prt_newline(out);
prt_printf(out, "bi_version=%llu", inode->bi_version);
+ prt_newline(out);
#define x(_name, _bits) \
prt_printf(out, #_name "=%llu", (u64) inode->_name); \
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 1baf78594cca..82f9170dab3f 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -264,6 +264,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
ret = 0;
err:
bch2_logged_op_finish(trans, op_k);
+ bch_err_fn(c, ret);
return ret;
}
@@ -476,6 +477,7 @@ case LOGGED_OP_FINSERT_finish:
break;
}
err:
+ bch_err_fn(c, ret);
bch2_logged_op_finish(trans, op_k);
bch2_trans_iter_exit(trans, &iter);
return ret;
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 725fcf46f631..eb1f9d6f5a19 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -247,7 +247,7 @@ static void journal_entry_err_msg(struct printbuf *out,
if (entry) {
prt_str(out, " type=");
- prt_str(out, bch2_jset_entry_types[entry->type]);
+ bch2_prt_jset_entry_type(out, entry->type);
}
if (!jset) {
@@ -403,7 +403,8 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
jset_entry_for_each_key(entry, k) {
if (!first) {
prt_newline(out);
- prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ bch2_prt_jset_entry_type(out, entry->type);
+ prt_str(out, ": ");
}
prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
@@ -563,9 +564,9 @@ static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
- prt_printf(out, "type=%s v=%llu",
- bch2_fs_usage_types[u->entry.btree_id],
- le64_to_cpu(u->v));
+ prt_str(out, "type=");
+ bch2_prt_fs_usage_type(out, u->entry.btree_id);
+ prt_printf(out, " v=%llu", le64_to_cpu(u->v));
}
static int journal_entry_data_usage_validate(struct bch_fs *c,
@@ -827,11 +828,11 @@ int bch2_journal_entry_validate(struct bch_fs *c,
void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
+ bch2_prt_jset_entry_type(out, entry->type);
+
if (entry->type < BCH_JSET_ENTRY_NR) {
- prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ prt_str(out, ": ");
bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
- } else {
- prt_printf(out, "(unknown type %u)", entry->type);
}
}
@@ -1722,7 +1723,7 @@ static void journal_write_endio(struct bio *bio)
percpu_ref_put(&ca->io_ref);
}
-static CLOSURE_CALLBACK(do_journal_write)
+static CLOSURE_CALLBACK(journal_write_submit)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
@@ -1767,6 +1768,44 @@ static CLOSURE_CALLBACK(do_journal_write)
continue_at(cl, journal_write_done, j->wq);
}
+static CLOSURE_CALLBACK(journal_write_preflush)
+{
+ closure_type(w, struct journal_buf, io);
+ struct journal *j = container_of(w, struct journal, buf[w->idx]);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+ if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
+ spin_lock(&j->lock);
+ closure_wait(&j->async_wait, cl);
+ spin_unlock(&j->lock);
+
+ continue_at(cl, journal_write_preflush, j->wq);
+ return;
+ }
+
+ if (w->separate_flush) {
+ for_each_rw_member(c, ca) {
+ percpu_ref_get(&ca->io_ref);
+
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->bio[w->idx]->bio;
+ bio_reset(bio, ca->disk_sb.bdev,
+ REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = ca;
+ closure_bio_submit(bio, cl);
+ }
+
+ continue_at(cl, journal_write_submit, j->wq);
+ } else {
+ /*
+ * no need to punt to another work item if we're not waiting on
+ * preflushes
+ */
+ journal_write_submit(&cl->work);
+ }
+}
+
static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
@@ -2032,23 +2071,9 @@ CLOSURE_CALLBACK(bch2_journal_write)
goto err;
if (!JSET_NO_FLUSH(w->data))
- closure_wait_event(&j->async_wait, j->seq_ondisk + 1 == le64_to_cpu(w->data->seq));
-
- if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
- for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
-
- struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->bio[w->idx]->bio;
- bio_reset(bio, ca->disk_sb.bdev,
- REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
- bio->bi_end_io = journal_write_endio;
- bio->bi_private = ca;
- closure_bio_submit(bio, cl);
- }
- }
-
- continue_at(cl, do_journal_write, j->wq);
+ continue_at(cl, journal_write_preflush, j->wq);
+ else
+ continue_at(cl, journal_write_submit, j->wq);
return;
no_io:
continue_at(cl, journal_write_done, j->wq);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index ab811c0dad26..04a577848b01 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -67,6 +67,8 @@ void bch2_journal_set_watermark(struct journal *j)
track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
trace_and_count(c, journal_full, c);
+ mod_bit(JOURNAL_SPACE_LOW, &j->flags, low_on_space || low_on_pin);
+
swap(watermark, j->watermark);
if (watermark > j->watermark)
journal_wake(j);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index b5303874fc35..37a024e034d4 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -95,8 +95,7 @@ out:
return ret ?: bch2_blacklist_table_initialize(c);
}
-static int journal_seq_blacklist_table_cmp(const void *_l,
- const void *_r, size_t size)
+static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
{
const struct journal_seq_blacklist_table_entry *l = _l;
const struct journal_seq_blacklist_table_entry *r = _r;
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index 8c053cb64ca5..b5161b5d76a0 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -134,6 +134,7 @@ enum journal_flags {
JOURNAL_STARTED,
JOURNAL_MAY_SKIP_FLUSH,
JOURNAL_NEED_FLUSH_WRITE,
+ JOURNAL_SPACE_LOW,
};
/* Reasons we may fail to get a journal reservation: */
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
index 9fac838d123e..b82f8209041f 100644
--- a/fs/bcachefs/logged_ops.c
+++ b/fs/bcachefs/logged_ops.c
@@ -37,7 +37,6 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
struct bkey_buf sk;
u32 restart_count = trans->restart_count;
- int ret;
if (!fn)
return 0;
@@ -45,11 +44,11 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
bch2_bkey_buf_init(&sk);
bch2_bkey_buf_reassemble(&sk, c, k);
- ret = drop_locks_do(trans, (bch2_fs_lazy_rw(c), 0)) ?:
- fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
+ fn->resume(trans, sk.k);
bch2_bkey_buf_exit(&sk, c);
- return ret;
+
+ return trans_was_restarted(trans, restart_count);
}
int bch2_resume_logged_ops(struct bch_fs *c)
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
index db63b3f3b338..4c298e74723d 100644
--- a/fs/bcachefs/mean_and_variance_test.c
+++ b/fs/bcachefs/mean_and_variance_test.c
@@ -136,20 +136,8 @@ static void mean_and_variance_test_1(struct kunit *test)
d, mean, stddev, weighted_mean, weighted_stddev);
}
-static void mean_and_variance_test_2(struct kunit *test)
-{
- s64 d[] = { 100, 10, 10, 10, 10, 10, 10 };
- s64 mean[] = { 10, 10, 10, 10, 10, 10, 10 };
- s64 stddev[] = { 9, 9, 9, 9, 9, 9, 9 };
- s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 };
- s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
/* Test behaviour where we switch from one steady state to another: */
-static void mean_and_variance_test_3(struct kunit *test)
+static void mean_and_variance_test_2(struct kunit *test)
{
s64 d[] = { 100, 100, 100, 100, 100 };
s64 mean[] = { 22, 32, 40, 46, 50 };
@@ -161,18 +149,6 @@ static void mean_and_variance_test_3(struct kunit *test)
d, mean, stddev, weighted_mean, weighted_stddev);
}
-static void mean_and_variance_test_4(struct kunit *test)
-{
- s64 d[] = { 100, 100, 100, 100, 100 };
- s64 mean[] = { 10, 11, 12, 13, 14 };
- s64 stddev[] = { 9, 13, 15, 17, 19 };
- s64 weighted_mean[] = { 32, 49, 61, 71, 78 };
- s64 weighted_stddev[] = { 38, 44, 44, 41, 38 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
static void mean_and_variance_fast_divpow2(struct kunit *test)
{
s64 i;
@@ -230,8 +206,6 @@ static struct kunit_case mean_and_variance_test_cases[] = {
KUNIT_CASE(mean_and_variance_weighted_advanced_test),
KUNIT_CASE(mean_and_variance_test_1),
KUNIT_CASE(mean_and_variance_test_2),
- KUNIT_CASE(mean_and_variance_test_3),
- KUNIT_CASE(mean_and_variance_test_4),
{}
};
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 08ea0cfc4aef..bb068fd72465 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -7,6 +7,7 @@
#include "disk_groups.h"
#include "error.h"
#include "opts.h"
+#include "recovery_passes.h"
#include "super-io.h"
#include "util.h"
@@ -42,7 +43,7 @@ const char * const __bch2_btree_ids[] = {
NULL
};
-const char * const bch2_csum_types[] = {
+static const char * const __bch2_csum_types[] = {
BCH_CSUM_TYPES()
NULL
};
@@ -52,7 +53,7 @@ const char * const bch2_csum_opts[] = {
NULL
};
-const char * const __bch2_compression_types[] = {
+static const char * const __bch2_compression_types[] = {
BCH_COMPRESSION_TYPES()
NULL
};
@@ -82,18 +83,39 @@ const char * const bch2_member_states[] = {
NULL
};
-const char * const bch2_jset_entry_types[] = {
+static const char * const __bch2_jset_entry_types[] = {
BCH_JSET_ENTRY_TYPES()
NULL
};
-const char * const bch2_fs_usage_types[] = {
+static const char * const __bch2_fs_usage_types[] = {
BCH_FS_USAGE_TYPES()
NULL
};
#undef x
+static void prt_str_opt_boundscheck(struct printbuf *out, const char * const opts[],
+ unsigned nr, const char *type, unsigned idx)
+{
+ if (idx < nr)
+ prt_str(out, opts[idx]);
+ else
+ prt_printf(out, "(unknown %s %u)", type, idx);
+}
+
+#define PRT_STR_OPT_BOUNDSCHECKED(name, type) \
+void bch2_prt_##name(struct printbuf *out, type t) \
+{ \
+ prt_str_opt_boundscheck(out, __bch2_##name##s, ARRAY_SIZE(__bch2_##name##s) - 1, #name, t);\
+}
+
+PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type, enum bch_jset_entry_type);
+PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type, enum bch_fs_usage_type);
+PRT_STR_OPT_BOUNDSCHECKED(data_type, enum bch_data_type);
+PRT_STR_OPT_BOUNDSCHECKED(csum_type, enum bch_csum_type);
+PRT_STR_OPT_BOUNDSCHECKED(compression_type, enum bch_compression_type);
+
static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
struct printbuf *err)
{
@@ -205,6 +227,9 @@ const struct bch_option bch2_opt_table[] = {
#define OPT_STR(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = ARRAY_SIZE(_choices), \
.choices = _choices
+#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
+ .min = 0, .max = U64_MAX, \
+ .choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 136083c11f3a..84e452835a17 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -16,18 +16,20 @@ extern const char * const bch2_version_upgrade_opts[];
extern const char * const bch2_sb_features[];
extern const char * const bch2_sb_compat[];
extern const char * const __bch2_btree_ids[];
-extern const char * const bch2_csum_types[];
extern const char * const bch2_csum_opts[];
-extern const char * const __bch2_compression_types[];
extern const char * const bch2_compression_opts[];
extern const char * const bch2_str_hash_types[];
extern const char * const bch2_str_hash_opts[];
extern const char * const __bch2_data_types[];
extern const char * const bch2_member_states[];
-extern const char * const bch2_jset_entry_types[];
-extern const char * const bch2_fs_usage_types[];
extern const char * const bch2_d_types[];
+void bch2_prt_jset_entry_type(struct printbuf *, enum bch_jset_entry_type);
+void bch2_prt_fs_usage_type(struct printbuf *, enum bch_fs_usage_type);
+void bch2_prt_data_type(struct printbuf *, enum bch_data_type);
+void bch2_prt_csum_type(struct printbuf *, enum bch_csum_type);
+void bch2_prt_compression_type(struct printbuf *, enum bch_compression_type);
+
static inline const char *bch2_d_type_str(unsigned d_type)
{
return (d_type < BCH_DT_MAX ? bch2_d_types[d_type] : NULL) ?: "(bad d_type)";
@@ -362,12 +364,17 @@ enum fsck_err_opts {
OPT_FS|OPT_MOUNT, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
- NULL, "Don't replay the journal") \
- x(keep_journal, u8, \
+ NULL, "Exit recovery immediately prior to journal replay")\
+ x(recovery_pass_last, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_STR_NOLIMIT(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Exit recovery after specified pass") \
+ x(retain_recovery_info, u8, \
0, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
- NULL, "Don't free journal entries/keys after startup")\
+ NULL, "Don't free journal entries/keys, scanned btree nodes after startup")\
x(read_entire_journal, u8, \
0, \
OPT_BOOL(), \
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 03f9d6afe467..be5b47619327 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -1,35 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
#include "alloc_background.h"
-#include "btree_gc.h"
+#include "bkey_buf.h"
#include "btree_journal_iter.h"
+#include "btree_node_scan.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "buckets.h"
#include "dirent.h"
-#include "ec.h"
#include "errcode.h"
#include "error.h"
#include "fs-common.h"
-#include "fsck.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "journal_seq_blacklist.h"
-#include "lru.h"
#include "logged_ops.h"
#include "move.h"
#include "quota.h"
#include "rebalance.h"
#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "sb-clean.h"
#include "sb-downgrade.h"
#include "snapshot.h"
-#include "subvolume.h"
#include "super-io.h"
#include <linux/sort.h>
@@ -37,22 +33,22 @@
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-static bool btree_id_is_alloc(enum btree_id id)
+void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
{
- switch (id) {
- case BTREE_ID_alloc:
- case BTREE_ID_backpointers:
- case BTREE_ID_need_discard:
- case BTREE_ID_freespace:
- case BTREE_ID_bucket_gens:
- return true;
- default:
- return false;
+ u64 b = BIT_ULL(btree);
+
+ if (!(c->sb.btrees_lost_data & b)) {
+ bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
+
+ mutex_lock(&c->sb_lock);
+ bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
}
}
/* for -o reconstruct_alloc: */
-static void do_reconstruct_alloc(struct bch_fs *c)
+static void bch2_reconstruct_alloc(struct bch_fs *c)
{
bch2_journal_log_msg(c, "dropping alloc info");
bch_info(c, "dropping and reconstructing all alloc info");
@@ -87,15 +83,17 @@ static void do_reconstruct_alloc(struct bch_fs *c)
c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
- struct journal_keys *keys = &c->journal_keys;
- size_t src, dst;
- move_gap(keys, keys->nr);
-
- for (src = 0, dst = 0; src < keys->nr; src++)
- if (!btree_id_is_alloc(keys->data[src].btree_id))
- keys->data[dst++] = keys->data[src];
- keys->nr = keys->gap = dst;
+ bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_freespace,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
}
/*
@@ -186,7 +184,7 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
return cmp_int(l->journal_seq, r->journal_seq);
}
-static int bch2_journal_replay(struct bch_fs *c)
+int bch2_journal_replay(struct bch_fs *c)
{
struct journal_keys *keys = &c->journal_keys;
DARRAY(struct journal_key *) keys_sorted = { 0 };
@@ -194,6 +192,7 @@ static int bch2_journal_replay(struct bch_fs *c)
u64 start_seq = c->journal_replay_seq_start;
u64 end_seq = c->journal_replay_seq_start;
struct btree_trans *trans = bch2_trans_get(c);
+ bool immediate_flush = false;
int ret = 0;
if (keys->nr) {
@@ -215,6 +214,13 @@ static int bch2_journal_replay(struct bch_fs *c)
darray_for_each(*keys, k) {
cond_resched();
+ /*
+ * k->allocated means the key wasn't read in from the journal,
+ * rather it was from early repair code
+ */
+ if (k->allocated)
+ immediate_flush = true;
+
/* Skip fastpath if we're low on space in the journal */
ret = c->journal.watermark ? -1 :
commit_do(trans, NULL, NULL,
@@ -243,7 +249,10 @@ static int bch2_journal_replay(struct bch_fs *c)
struct journal_key *k = *kp;
- replay_now_at(j, k->journal_seq);
+ if (k->journal_seq)
+ replay_now_at(j, k->journal_seq);
+ else
+ replay_now_at(j, j->replay_journal_seq_end);
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc|
@@ -266,7 +275,8 @@ static int bch2_journal_replay(struct bch_fs *c)
bch2_trans_put(trans);
trans = NULL;
- if (!c->opts.keep_journal)
+ if (!c->opts.retain_recovery_info &&
+ c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
bch2_journal_keys_put_initial(c);
replay_now_at(j, j->replay_journal_seq_end);
@@ -274,6 +284,12 @@ static int bch2_journal_replay(struct bch_fs *c)
bch2_journal_set_replay_done(j);
+ /* if we did any repair, flush it immediately */
+ if (immediate_flush) {
+ bch2_journal_flush_all_pins(&c->journal);
+ ret = bch2_journal_meta(&c->journal);
+ }
+
if (keys->nr)
bch2_journal_log_msg(c, "journal replay finished");
err:
@@ -423,10 +439,9 @@ static int journal_replay_early(struct bch_fs *c,
static int read_btree_roots(struct bch_fs *c)
{
- unsigned i;
int ret = 0;
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (!r->alive)
@@ -435,186 +450,46 @@ static int read_btree_roots(struct bch_fs *c)
if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
continue;
- if (r->error) {
- __fsck_err(c,
- btree_id_is_alloc(i)
- ? FSCK_CAN_IGNORE : 0,
- btree_root_bkey_invalid,
- "invalid btree root %s",
- bch2_btree_id_str(i));
- if (i == BTREE_ID_alloc)
+ if (mustfix_fsck_err_on((ret = r->error),
+ c, btree_root_bkey_invalid,
+ "invalid btree root %s",
+ bch2_btree_id_str(i)) ||
+ mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
+ c, btree_root_read_error,
+ "error reading btree root %s l=%u: %s",
+ bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
+ if (btree_id_is_alloc(i)) {
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
- }
+ r->error = 0;
+ } else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
+ bch_info(c, "will run btree node scan");
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ }
- ret = bch2_btree_root_read(c, i, &r->key, r->level);
- if (ret) {
- fsck_err(c,
- btree_root_read_error,
- "error reading btree root %s",
- bch2_btree_id_str(i));
- if (btree_id_is_alloc(i))
- c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
ret = 0;
+ bch2_btree_lost_data(c, i);
}
}
- for (i = 0; i < BTREE_ID_NR; i++) {
+ for (unsigned i = 0; i < BTREE_ID_NR; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
- if (!r->b) {
+ if (!r->b && !r->error) {
r->alive = false;
r->level = 0;
- bch2_btree_root_alloc(c, i);
+ bch2_btree_root_alloc_fake(c, i, 0);
}
}
fsck_err:
return ret;
}
-static int bch2_initialize_subvolumes(struct bch_fs *c)
-{
- struct bkey_i_snapshot_tree root_tree;
- struct bkey_i_snapshot root_snapshot;
- struct bkey_i_subvolume root_volume;
- int ret;
-
- bkey_snapshot_tree_init(&root_tree.k_i);
- root_tree.k.p.offset = 1;
- root_tree.v.master_subvol = cpu_to_le32(1);
- root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
-
- bkey_snapshot_init(&root_snapshot.k_i);
- root_snapshot.k.p.offset = U32_MAX;
- root_snapshot.v.flags = 0;
- root_snapshot.v.parent = 0;
- root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
- root_snapshot.v.tree = cpu_to_le32(1);
- SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
-
- bkey_subvolume_init(&root_volume.k_i);
- root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_volume.v.flags = 0;
- root_volume.v.snapshot = cpu_to_le32(U32_MAX);
- root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
-
- ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
- bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
- bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_inode_unpacked inode;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- if (!bkey_is_inode(k.k)) {
- bch_err(trans->c, "root inode not found");
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
- }
-
- ret = bch2_inode_unpack(k, &inode);
- BUG_ON(ret);
-
- inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
-
- ret = bch2_inode_write(trans, &iter, &inode);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/* set bi_subvol on root inode */
-noinline_for_stack
-static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
-{
- int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- __bch2_fs_upgrade_for_subvolumes(trans));
- bch_err_fn(c, ret);
- return ret;
-}
-
-const char * const bch2_recovery_passes[] = {
-#define x(_fn, ...) #_fn,
- BCH_RECOVERY_PASSES()
-#undef x
- NULL
-};
-
-static int bch2_check_allocations(struct bch_fs *c)
-{
- return bch2_gc(c, true, c->opts.norecovery);
-}
-
-static int bch2_set_may_go_rw(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
-
- /*
- * After we go RW, the journal keys buffer can't be modified (except for
- * setting journal_key->overwritten: it will be accessed by multiple
- * threads
- */
- move_gap(keys, keys->nr);
-
- set_bit(BCH_FS_may_go_rw, &c->flags);
-
- if (keys->nr || c->opts.fsck || !c->sb.clean)
- return bch2_fs_read_write_early(c);
- return 0;
-}
-
-struct recovery_pass_fn {
- int (*fn)(struct bch_fs *);
- unsigned when;
-};
-
-static struct recovery_pass_fn recovery_pass_fns[] = {
-#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
- BCH_RECOVERY_PASSES()
-#undef x
-};
-
-u64 bch2_recovery_passes_to_stable(u64 v)
-{
- static const u8 map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- };
-
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(map[i]);
- return ret;
-}
-
-u64 bch2_recovery_passes_from_stable(u64 v)
-{
- static const u8 map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- };
-
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(map[i]);
- return ret;
-}
-
static bool check_version_upgrade(struct bch_fs *c)
{
unsigned latest_version = bcachefs_metadata_version_current;
@@ -687,96 +562,6 @@ static bool check_version_upgrade(struct bch_fs *c)
return false;
}
-u64 bch2_fsck_recovery_passes(void)
-{
- u64 ret = 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
- if (recovery_pass_fns[i].when & PASS_FSCK)
- ret |= BIT_ULL(i);
- return ret;
-}
-
-static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
-
- if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
- return false;
- if (c->recovery_passes_explicit & BIT_ULL(pass))
- return true;
- if ((p->when & PASS_FSCK) && c->opts.fsck)
- return true;
- if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
- return true;
- if (p->when & PASS_ALWAYS)
- return true;
- return false;
-}
-
-static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
- int ret;
-
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
- bch2_recovery_passes[pass]);
- ret = p->fn(c);
- if (ret)
- return ret;
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_CONT " done\n");
-
- return 0;
-}
-
-static int bch2_run_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
- if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
- unsigned pass = c->curr_recovery_pass;
-
- ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
- (ret && c->curr_recovery_pass < pass))
- continue;
- if (ret)
- break;
-
- c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
- }
- c->curr_recovery_pass++;
- c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
- }
-
- return ret;
-}
-
-int bch2_run_online_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
- struct recovery_pass_fn *p = recovery_pass_fns + i;
-
- if (!(p->when & PASS_ONLINE))
- continue;
-
- ret = bch2_run_recovery_pass(c, i);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
- i = c->curr_recovery_pass;
- continue;
- }
- if (ret)
- break;
- }
-
- return ret;
-}
-
int bch2_fs_recovery(struct bch_fs *c)
{
struct bch_sb_field_clean *clean = NULL;
@@ -809,24 +594,14 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (c->opts.fsck && c->opts.norecovery) {
- bch_err(c, "cannot select both norecovery and fsck");
- ret = -EINVAL;
- goto err;
- }
+ if (c->opts.norecovery)
+ c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
if (!c->opts.nochanges) {
mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
bool write_sb = false;
- struct bch_sb_field_ext *ext =
- bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
- if (!ext) {
- ret = -BCH_ERR_ENOSPC_sb;
- mutex_unlock(&c->sb_lock);
- goto err;
- }
-
if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
ext->recovery_passes_required[0] |=
cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
@@ -885,7 +660,7 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
+ if (!c->sb.clean || c->opts.fsck || c->opts.retain_recovery_info) {
struct genradix_iter iter;
struct journal_replay **i;
@@ -965,7 +740,7 @@ use_clean:
c->journal_replay_seq_end = blacklist_seq - 1;
if (c->opts.reconstruct_alloc)
- do_reconstruct_alloc(c);
+ bch2_reconstruct_alloc(c);
zero_out_btree_mem_ptr(&c->journal_keys);
@@ -1017,6 +792,12 @@ use_clean:
clear_bit(BCH_FS_fsck_running, &c->flags);
+ /* fsync if we fixed errors */
+ if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_meta(&c->journal);
+ }
+
/* If we fixed errors, verify that fs is actually clean now: */
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
test_bit(BCH_FS_errors_fixed, &c->flags) &&
@@ -1051,6 +832,7 @@ use_clean:
}
mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
bool write_sb = false;
if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
@@ -1064,15 +846,18 @@ use_clean:
write_sb = true;
}
- if (!test_bit(BCH_FS_error, &c->flags)) {
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- if (ext &&
- (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
- !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
- memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
- memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
- write_sb = true;
- }
+ if (!test_bit(BCH_FS_error, &c->flags) &&
+ !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
+ memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
+ write_sb = true;
+ }
+
+ if (c->opts.fsck &&
+ !test_bit(BCH_FS_error, &c->flags) &&
+ c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 &&
+ ext->btrees_lost_data) {
+ ext->btrees_lost_data = 0;
+ write_sb = true;
}
if (c->opts.fsck &&
@@ -1113,9 +898,10 @@ use_clean:
out:
bch2_flush_fsck_errs(c);
- if (!c->opts.keep_journal &&
- test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ if (!c->opts.retain_recovery_info) {
bch2_journal_keys_put_initial(c);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
+ }
kfree(clean);
if (!ret &&
@@ -1141,6 +927,7 @@ int bch2_fs_initialize(struct bch_fs *c)
int ret;
bch_notice(c, "initializing new filesystem");
+ set_bit(BCH_FS_new_fs, &c->flags);
mutex_lock(&c->sb_lock);
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
@@ -1155,11 +942,11 @@ int bch2_fs_initialize(struct bch_fs *c)
}
mutex_unlock(&c->sb_lock);
- c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
+ c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
set_bit(BCH_FS_may_go_rw, &c->flags);
for (unsigned i = 0; i < BTREE_ID_NR; i++)
- bch2_btree_root_alloc(c, i);
+ bch2_btree_root_alloc_fake(c, i, 0);
for_each_member_device(c, ca)
bch2_dev_usage_init(ca);
@@ -1230,7 +1017,7 @@ int bch2_fs_initialize(struct bch_fs *c)
if (ret)
goto err;
- c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1;
+ c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
if (enabled_qtypes(c)) {
ret = bch2_fs_quota_read(c);
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index 4e9d24719b2e..4bf818de1f2f 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -2,37 +2,9 @@
#ifndef _BCACHEFS_RECOVERY_H
#define _BCACHEFS_RECOVERY_H
-extern const char * const bch2_recovery_passes[];
+void bch2_btree_lost_data(struct bch_fs *, enum btree_id);
-u64 bch2_recovery_passes_to_stable(u64 v);
-u64 bch2_recovery_passes_from_stable(u64 v);
-
-/*
- * For when we need to rewind recovery passes and run a pass we skipped:
- */
-static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
- enum bch_recovery_pass pass)
-{
- if (c->recovery_passes_explicit & BIT_ULL(pass))
- return 0;
-
- bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
- bch2_recovery_passes[pass], pass,
- bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
-
- c->recovery_passes_explicit |= BIT_ULL(pass);
-
- if (c->curr_recovery_pass >= pass) {
- c->curr_recovery_pass = pass;
- c->recovery_passes_complete &= (1ULL << pass) >> 1;
- return -BCH_ERR_restart_recovery;
- } else {
- return 0;
- }
-}
-
-int bch2_run_online_recovery_passes(struct bch_fs *);
-u64 bch2_fsck_recovery_passes(void);
+int bch2_journal_replay(struct bch_fs *);
int bch2_fs_recovery(struct bch_fs *);
int bch2_fs_initialize(struct bch_fs *);
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
new file mode 100644
index 000000000000..0cec0f7d9703
--- /dev/null
+++ b/fs/bcachefs/recovery_passes.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "backpointers.h"
+#include "btree_gc.h"
+#include "btree_node_scan.h"
+#include "ec.h"
+#include "fsck.h"
+#include "inode.h"
+#include "journal.h"
+#include "lru.h"
+#include "logged_ops.h"
+#include "rebalance.h"
+#include "recovery.h"
+#include "recovery_passes.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "super.h"
+#include "super-io.h"
+
+const char * const bch2_recovery_passes[] = {
+#define x(_fn, ...) #_fn,
+ BCH_RECOVERY_PASSES()
+#undef x
+ NULL
+};
+
+static int bch2_check_allocations(struct bch_fs *c)
+{
+ return bch2_gc(c, true, false);
+}
+
+static int bch2_set_may_go_rw(struct bch_fs *c)
+{
+ struct journal_keys *keys = &c->journal_keys;
+
+ /*
+ * After we go RW, the journal keys buffer can't be modified (except for
+ * setting journal_key->overwritten: it will be accessed by multiple
+ * threads
+ */
+ move_gap(keys, keys->nr);
+
+ set_bit(BCH_FS_may_go_rw, &c->flags);
+
+ if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
+ return bch2_fs_read_write_early(c);
+ return 0;
+}
+
+struct recovery_pass_fn {
+ int (*fn)(struct bch_fs *);
+ unsigned when;
+};
+
+static struct recovery_pass_fn recovery_pass_fns[] = {
+#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+static const u8 passes_to_stable_map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
+{
+ return passes_to_stable_map[pass];
+}
+
+u64 bch2_recovery_passes_to_stable(u64 v)
+{
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(passes_to_stable_map[i]);
+ return ret;
+}
+
+u64 bch2_recovery_passes_from_stable(u64 v)
+{
+ static const u8 map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+ };
+
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(map[i]);
+ return ret;
+}
+
+/*
+ * For when we need to rewind recovery passes and run a pass we skipped:
+ */
+int bch2_run_explicit_recovery_pass(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return 0;
+
+ bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
+ bch2_recovery_passes[pass], pass,
+ bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
+
+ c->recovery_passes_explicit |= BIT_ULL(pass);
+
+ if (c->curr_recovery_pass >= pass) {
+ c->curr_recovery_pass = pass;
+ c->recovery_passes_complete &= (1ULL << pass) >> 1;
+ return -BCH_ERR_restart_recovery;
+ } else {
+ return 0;
+ }
+}
+
+int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+
+ if (!test_bit_le64(s, ext->recovery_passes_required)) {
+ __set_bit_le64(s, ext->recovery_passes_required);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+
+ return bch2_run_explicit_recovery_pass(c, pass);
+}
+
+static void bch2_clear_recovery_pass_required(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+
+ if (test_bit_le64(s, ext->recovery_passes_required)) {
+ __clear_bit_le64(s, ext->recovery_passes_required);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+}
+
+u64 bch2_fsck_recovery_passes(void)
+{
+ u64 ret = 0;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
+ if (recovery_pass_fns[i].when & PASS_FSCK)
+ ret |= BIT_ULL(i);
+ return ret;
+}
+
+static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ struct recovery_pass_fn *p = recovery_pass_fns + pass;
+
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return true;
+ if ((p->when & PASS_FSCK) && c->opts.fsck)
+ return true;
+ if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
+ return true;
+ if (p->when & PASS_ALWAYS)
+ return true;
+ return false;
+}
+
+static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ struct recovery_pass_fn *p = recovery_pass_fns + pass;
+ int ret;
+
+ if (!(p->when & PASS_SILENT))
+ bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
+ bch2_recovery_passes[pass]);
+ ret = p->fn(c);
+ if (ret)
+ return ret;
+ if (!(p->when & PASS_SILENT))
+ bch2_print(c, KERN_CONT " done\n");
+
+ return 0;
+}
+
+int bch2_run_online_recovery_passes(struct bch_fs *c)
+{
+ int ret = 0;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
+ struct recovery_pass_fn *p = recovery_pass_fns + i;
+
+ if (!(p->when & PASS_ONLINE))
+ continue;
+
+ ret = bch2_run_recovery_pass(c, i);
+ if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
+ i = c->curr_recovery_pass;
+ continue;
+ }
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int bch2_run_recovery_passes(struct bch_fs *c)
+{
+ int ret = 0;
+
+ while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
+ if (c->opts.recovery_pass_last &&
+ c->curr_recovery_pass > c->opts.recovery_pass_last)
+ break;
+
+ if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
+ unsigned pass = c->curr_recovery_pass;
+
+ ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
+ if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
+ (ret && c->curr_recovery_pass < pass))
+ continue;
+ if (ret)
+ break;
+
+ c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
+ }
+
+ c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
+
+ if (!test_bit(BCH_FS_error, &c->flags))
+ bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
+
+ c->curr_recovery_pass++;
+ }
+
+ return ret;
+}
diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h
new file mode 100644
index 000000000000..99b464e127b8
--- /dev/null
+++ b/fs/bcachefs/recovery_passes.h
@@ -0,0 +1,17 @@
+#ifndef _BCACHEFS_RECOVERY_PASSES_H
+#define _BCACHEFS_RECOVERY_PASSES_H
+
+extern const char * const bch2_recovery_passes[];
+
+u64 bch2_recovery_passes_to_stable(u64 v);
+u64 bch2_recovery_passes_from_stable(u64 v);
+
+u64 bch2_fsck_recovery_passes(void);
+
+int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
+int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass);
+
+int bch2_run_online_recovery_passes(struct bch_fs *);
+int bch2_run_recovery_passes(struct bch_fs *);
+
+#endif /* _BCACHEFS_RECOVERY_PASSES_H */
diff --git a/fs/bcachefs/recovery_types.h b/fs/bcachefs/recovery_passes_types.h
index 4959e95e7c74..773aea9a0080 100644
--- a/fs/bcachefs/recovery_types.h
+++ b/fs/bcachefs/recovery_passes_types.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_RECOVERY_TYPES_H
-#define _BCACHEFS_RECOVERY_TYPES_H
+#ifndef _BCACHEFS_RECOVERY_PASSES_TYPES_H
+#define _BCACHEFS_RECOVERY_PASSES_TYPES_H
#define PASS_SILENT BIT(0)
#define PASS_FSCK BIT(1)
@@ -13,6 +13,7 @@
* must never change:
*/
#define BCH_RECOVERY_PASSES() \
+ x(scan_for_btree_nodes, 37, 0) \
x(check_topology, 4, 0) \
x(alloc_read, 0, PASS_ALWAYS) \
x(stripes_read, 1, PASS_ALWAYS) \
@@ -31,13 +32,13 @@
x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \
x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \
x(bucket_gens_init, 17, 0) \
+ x(reconstruct_snapshots, 38, 0) \
x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \
x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \
x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \
x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \
x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \
x(fs_upgrade_for_subvolumes, 22, 0) \
- x(resume_logged_ops, 23, PASS_ALWAYS) \
x(check_inodes, 24, PASS_FSCK) \
x(check_extents, 25, PASS_FSCK) \
x(check_indirect_extents, 26, PASS_FSCK) \
@@ -47,6 +48,7 @@
x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \
+ x(resume_logged_ops, 23, PASS_ALWAYS) \
x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
x(fix_reflink_p, 33, 0) \
x(set_fs_needs_rebalance, 34, 0) \
@@ -56,6 +58,7 @@ enum bch_recovery_pass {
#define x(n, id, when) BCH_RECOVERY_PASS_##n,
BCH_RECOVERY_PASSES()
#undef x
+ BCH_RECOVERY_PASS_NR
};
/* But we also need stable identifiers that can be used in the superblock */
@@ -65,4 +68,4 @@ enum bch_recovery_pass_stable {
#undef x
};
-#endif /* _BCACHEFS_RECOVERY_TYPES_H */
+#endif /* _BCACHEFS_RECOVERY_PASSES_TYPES_H */
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index c47c66c2b394..ff7864731a07 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -185,8 +185,7 @@ not_found:
} else {
bkey_error_init(update);
update->k.p = p.k->p;
- update->k.p.offset = next_idx;
- update->k.size = next_idx - *idx;
+ update->k.size = p.k->size;
set_bkey_val_u64s(&update->k, 0);
}
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index cc2672c12031..678b9c20e251 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -6,12 +6,15 @@
#include "replicas.h"
#include "super-io.h"
+#include <linux/sort.h>
+
static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
struct bch_replicas_cpu *);
/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
-static int bch2_memcmp(const void *l, const void *r, size_t size)
+static int bch2_memcmp(const void *l, const void *r, const void *priv)
{
+ size_t size = (size_t) priv;
return memcmp(l, r, size);
}
@@ -39,7 +42,8 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
{
- eytzinger0_sort(r->entries, r->nr, r->entry_size, bch2_memcmp, NULL);
+ eytzinger0_sort_r(r->entries, r->nr, r->entry_size,
+ bch2_memcmp, NULL, (void *)(size_t)r->entry_size);
}
static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
@@ -228,7 +232,7 @@ static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
verify_replicas_entry(search);
-#define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
+#define entry_cmp(_l, _r) memcmp(_l, _r, entry_size)
idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
entry_cmp, search);
#undef entry_cmp
@@ -824,10 +828,11 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
{
unsigned i;
- sort_cmp_size(cpu_r->entries,
- cpu_r->nr,
- cpu_r->entry_size,
- bch2_memcmp, NULL);
+ sort_r(cpu_r->entries,
+ cpu_r->nr,
+ cpu_r->entry_size,
+ bch2_memcmp, NULL,
+ (void *)(size_t)cpu_r->entry_size);
for (i = 0; i < cpu_r->nr; i++) {
struct bch_replicas_entry_v1 *e =
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index 5980ba2563fe..35ca3f138de6 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -29,6 +29,14 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *cle
for (entry = clean->start;
entry < (struct jset_entry *) vstruct_end(&clean->field);
entry = vstruct_next(entry)) {
+ if (vstruct_end(entry) > vstruct_end(&clean->field)) {
+ bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu",
+ le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s),
+ (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field));
+ bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+
ret = bch2_journal_entry_validate(c, NULL, entry,
le16_to_cpu(c->disk_sb.sb->version),
BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index e4396cb0bacb..a98ef940b7a3 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -7,7 +7,7 @@
#include "bcachefs.h"
#include "darray.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "sb-downgrade.h"
#include "sb-errors.h"
#include "super-io.h"
@@ -51,7 +51,10 @@
BCH_FSCK_ERR_subvol_fs_path_parent_wrong) \
x(btree_subvolume_children, \
BIT_ULL(BCH_RECOVERY_PASS_check_subvols), \
- BCH_FSCK_ERR_subvol_children_not_set)
+ BCH_FSCK_ERR_subvol_children_not_set) \
+ x(mi_btree_bitmap, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
+ BCH_FSCK_ERR_btree_bitmap_not_marked)
#define DOWNGRADE_TABLE()
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
index 5178bf579f7c..06c7a644f4a4 100644
--- a/fs/bcachefs/sb-errors_types.h
+++ b/fs/bcachefs/sb-errors_types.h
@@ -130,7 +130,7 @@
x(bucket_gens_nonzero_for_invalid_buckets, 122) \
x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
x(need_discard_freespace_key_bad, 124) \
- x(backpointer_pos_wrong, 125) \
+ x(backpointer_bucket_offset_wrong, 125) \
x(backpointer_to_missing_device, 126) \
x(backpointer_to_missing_alloc, 127) \
x(backpointer_to_missing_ptr, 128) \
@@ -265,7 +265,14 @@
x(subvol_children_bad, 257) \
x(subvol_loop, 258) \
x(subvol_unreachable, 259) \
- x(btree_node_bkey_bad_u64s, 260)
+ x(btree_node_bkey_bad_u64s, 260) \
+ x(btree_node_topology_empty_interior_node, 261) \
+ x(btree_ptr_v2_min_key_bad, 262) \
+ x(btree_root_unreadable_and_scan_found_nothing, 263) \
+ x(snapshot_node_missing, 264) \
+ x(dup_backpointer_to_bad_csum_extent, 265) \
+ x(btree_bitmap_not_marked, 266) \
+ x(sb_clean_entry_overrun, 267)
enum bch_sb_error_id {
#define x(t, n) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index eff5ce18c69c..5b8e621ac5eb 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_cache.h"
#include "disk_groups.h"
#include "opts.h"
#include "replicas.h"
@@ -426,3 +427,55 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
+
+/*
+ * Per member "range has btree nodes" bitmap:
+ *
+ * This is so that if we ever have to run the btree node scan to repair we don't
+ * have to scan full devices:
+ */
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
+{
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
+ if (!bch2_dev_btree_bitmap_marked_sectors(bch_dev_bkey_exists(c, ptr->dev),
+ ptr->offset, btree_sectors(c)))
+ return false;
+ return true;
+}
+
+static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev,
+ u64 start, unsigned sectors)
+{
+ struct bch_member *m = __bch2_members_v2_get_mut(mi, dev);
+ u64 bitmap = le64_to_cpu(m->btree_allocated_bitmap);
+
+ u64 end = start + sectors;
+
+ int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6);
+ if (resize > 0) {
+ u64 new_bitmap = 0;
+
+ for (unsigned i = 0; i < 64; i++)
+ if (bitmap & BIT_ULL(i))
+ new_bitmap |= BIT_ULL(i >> resize);
+ bitmap = new_bitmap;
+ m->btree_bitmap_shift += resize;
+ }
+
+ for (unsigned bit = start >> m->btree_bitmap_shift;
+ (u64) bit << m->btree_bitmap_shift < end;
+ bit++)
+ bitmap |= BIT_ULL(bit);
+
+ m->btree_allocated_bitmap = cpu_to_le64(bitmap);
+}
+
+void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
+{
+ lockdep_assert_held(&c->sb_lock);
+
+ struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
+ __bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
+}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index be0a94183271..5efa64eca5f8 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_SB_MEMBERS_H
#include "darray.h"
+#include "bkey_types.h"
extern char * const bch2_member_error_strs[];
@@ -220,6 +221,8 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
: 1,
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
.valid = bch2_member_exists(mi),
+ .btree_bitmap_shift = mi->btree_bitmap_shift,
+ .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
};
}
@@ -228,4 +231,22 @@ void bch2_sb_members_from_cpu(struct bch_fs *);
void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
void bch2_dev_errors_reset(struct bch_dev *);
+static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
+{
+ u64 end = start + sectors;
+
+ if (end > 64ULL << ca->mi.btree_bitmap_shift)
+ return false;
+
+ for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
+ (u64) bit << ca->mi.btree_bitmap_shift < end;
+ bit++)
+ if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
+ return false;
+ return true;
+}
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
+void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
+
#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 39debe814bf3..544322d5c251 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -8,6 +8,7 @@
#include "errcode.h"
#include "error.h"
#include "fs.h"
+#include "recovery_passes.h"
#include "snapshot.h"
#include <linux/random.h>
@@ -93,8 +94,10 @@ static int bch2_snapshot_tree_create(struct btree_trans *trans,
static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
{
- while (id && id < ancestor)
- id = __snapshot_t(t, id)->parent;
+ while (id && id < ancestor) {
+ const struct snapshot_t *s = __snapshot_t(t, id);
+ id = s ? s->parent : 0;
+ }
return id == ancestor;
}
@@ -110,6 +113,8 @@ static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancest
static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
{
const struct snapshot_t *s = __snapshot_t(t, id);
+ if (!s)
+ return 0;
if (s->skip[2] <= ancestor)
return s->skip[2];
@@ -120,6 +125,15 @@ static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ances
return s->parent;
}
+static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor)
+{
+ const struct snapshot_t *s = __snapshot_t(t, id);
+ if (!s)
+ return false;
+
+ return test_bit(ancestor - id - 1, s->is_ancestor);
+}
+
bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
{
bool ret;
@@ -127,7 +141,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
rcu_read_lock();
struct snapshot_table *t = rcu_dereference(c->snapshots);
- if (unlikely(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots)) {
+ if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
goto out;
}
@@ -135,13 +149,11 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
while (id && id < ancestor - IS_ANCESTOR_BITMAP)
id = get_ancestor_below(t, id, ancestor);
- if (id && id < ancestor) {
- ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
+ ret = id && id < ancestor
+ ? test_ancestor_bitmap(t, id, ancestor)
+ : id == ancestor;
- EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
- } else {
- ret = id == ancestor;
- }
+ EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
out:
rcu_read_unlock();
@@ -151,36 +163,39 @@ out:
static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
{
size_t idx = U32_MAX - id;
- size_t new_size;
struct snapshot_table *new, *old;
- new_size = max(16UL, roundup_pow_of_two(idx + 1));
+ size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
+ size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
- new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
+ new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new)
return NULL;
+ new->nr = new_size;
+
old = rcu_dereference_protected(c->snapshots, true);
if (old)
- memcpy(new->s,
- rcu_dereference_protected(c->snapshots, true)->s,
- sizeof(new->s[0]) * c->snapshot_table_size);
+ memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
rcu_assign_pointer(c->snapshots, new);
- c->snapshot_table_size = new_size;
- kvfree_rcu_mightsleep(old);
+ kvfree_rcu(old, rcu);
- return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+ return &rcu_dereference_protected(c->snapshots,
+ lockdep_is_held(&c->snapshot_table_lock))->s[idx];
}
static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
{
size_t idx = U32_MAX - id;
+ struct snapshot_table *table =
+ rcu_dereference_protected(c->snapshots,
+ lockdep_is_held(&c->snapshot_table_lock));
lockdep_assert_held(&c->snapshot_table_lock);
- if (likely(idx < c->snapshot_table_size))
- return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+ if (likely(table && idx < table->nr))
+ return &table->s[idx];
return __snapshot_t_mut(c, id);
}
@@ -567,6 +582,13 @@ static int check_snapshot_tree(struct btree_trans *trans,
u32 subvol_id;
ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
+ bch_err_fn(c, ret);
+
+ if (bch2_err_matches(ret, ENOENT)) { /* nothing to be done here */
+ ret = 0;
+ goto err;
+ }
+
if (ret)
goto err;
@@ -724,7 +746,6 @@ static int check_snapshot(struct btree_trans *trans,
u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
u32 real_depth;
struct printbuf buf = PRINTBUF;
- bool should_have_subvol;
u32 i, id;
int ret = 0;
@@ -770,7 +791,7 @@ static int check_snapshot(struct btree_trans *trans,
}
}
- should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
+ bool should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
!BCH_SNAPSHOT_DELETED(&s);
if (should_have_subvol) {
@@ -872,6 +893,154 @@ int bch2_check_snapshots(struct bch_fs *c)
return ret;
}
+static int check_snapshot_exists(struct btree_trans *trans, u32 id)
+{
+ struct bch_fs *c = trans->c;
+
+ if (bch2_snapshot_equiv(c, id))
+ return 0;
+
+ u32 tree_id;
+ int ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
+ if (ret)
+ return ret;
+
+ struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot));
+ ret = PTR_ERR_OR_ZERO(snapshot);
+ if (ret)
+ return ret;
+
+ bkey_snapshot_init(&snapshot->k_i);
+ snapshot->k.p = POS(0, id);
+ snapshot->v.tree = cpu_to_le32(tree_id);
+ snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c));
+
+ return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?:
+ bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
+ bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?:
+ bch2_snapshot_set_equiv(trans, bkey_i_to_s_c(&snapshot->k_i));
+}
+
+/* Figure out which snapshot nodes belong in the same tree: */
+struct snapshot_tree_reconstruct {
+ enum btree_id btree;
+ struct bpos cur_pos;
+ snapshot_id_list cur_ids;
+ DARRAY(snapshot_id_list) trees;
+};
+
+static void snapshot_tree_reconstruct_exit(struct snapshot_tree_reconstruct *r)
+{
+ darray_for_each(r->trees, i)
+ darray_exit(i);
+ darray_exit(&r->trees);
+ darray_exit(&r->cur_ids);
+}
+
+static inline bool same_snapshot(struct snapshot_tree_reconstruct *r, struct bpos pos)
+{
+ return r->btree == BTREE_ID_inodes
+ ? r->cur_pos.offset == pos.offset
+ : r->cur_pos.inode == pos.inode;
+}
+
+static inline bool snapshot_id_lists_have_common(snapshot_id_list *l, snapshot_id_list *r)
+{
+ darray_for_each(*l, i)
+ if (snapshot_list_has_id(r, *i))
+ return true;
+ return false;
+}
+
+static void snapshot_id_list_to_text(struct printbuf *out, snapshot_id_list *s)
+{
+ bool first = true;
+ darray_for_each(*s, i) {
+ if (!first)
+ prt_char(out, ' ');
+ first = false;
+ prt_printf(out, "%u", *i);
+ }
+}
+
+static int snapshot_tree_reconstruct_next(struct bch_fs *c, struct snapshot_tree_reconstruct *r)
+{
+ if (r->cur_ids.nr) {
+ darray_for_each(r->trees, i)
+ if (snapshot_id_lists_have_common(i, &r->cur_ids)) {
+ int ret = snapshot_list_merge(c, i, &r->cur_ids);
+ if (ret)
+ return ret;
+ goto out;
+ }
+ darray_push(&r->trees, r->cur_ids);
+ darray_init(&r->cur_ids);
+ }
+out:
+ r->cur_ids.nr = 0;
+ return 0;
+}
+
+static int get_snapshot_trees(struct bch_fs *c, struct snapshot_tree_reconstruct *r, struct bpos pos)
+{
+ if (!same_snapshot(r, pos))
+ snapshot_tree_reconstruct_next(c, r);
+ r->cur_pos = pos;
+ return snapshot_list_add_nodup(c, &r->cur_ids, pos.snapshot);
+}
+
+int bch2_reconstruct_snapshots(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct printbuf buf = PRINTBUF;
+ struct snapshot_tree_reconstruct r = {};
+ int ret = 0;
+
+ for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
+ if (btree_type_has_snapshots(btree)) {
+ r.btree = btree;
+
+ ret = for_each_btree_key(trans, iter, btree, POS_MIN,
+ BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_PREFETCH, k, ({
+ get_snapshot_trees(c, &r, k.k->p);
+ }));
+ if (ret)
+ goto err;
+
+ snapshot_tree_reconstruct_next(c, &r);
+ }
+ }
+
+ darray_for_each(r.trees, t) {
+ printbuf_reset(&buf);
+ snapshot_id_list_to_text(&buf, t);
+
+ darray_for_each(*t, id) {
+ if (fsck_err_on(!bch2_snapshot_equiv(c, *id),
+ c, snapshot_node_missing,
+ "snapshot node %u from tree %s missing", *id, buf.buf)) {
+ if (t->nr > 1) {
+ bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto err;
+ }
+
+ ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ check_snapshot_exists(trans, *id));
+ if (ret)
+ goto err;
+ }
+ }
+ }
+fsck_err:
+err:
+ bch2_trans_put(trans);
+ snapshot_tree_reconstruct_exit(&r);
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
/*
* Mark a snapshot as deleted, for future cleanup:
*/
@@ -1682,6 +1851,20 @@ int bch2_snapshots_read(struct bch_fs *c)
POS_MIN, 0, k,
(set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
bch_err_fn(c, ret);
+
+ /*
+ * It's important that we check if we need to reconstruct snapshots
+ * before going RW, so we mark that pass as required in the superblock -
+ * otherwise, we could end up deleting keys with missing snapshot nodes
+ * instead
+ */
+ BUG_ON(!test_bit(BCH_FS_new_fs, &c->flags) &&
+ test_bit(BCH_FS_may_go_rw, &c->flags));
+
+ if (bch2_err_matches(ret, EIO) ||
+ (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots)))
+ ret = bch2_run_explicit_recovery_pass_persistent(c, BCH_RECOVERY_PASS_reconstruct_snapshots);
+
return ret;
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index 7c66ffc06385..b7d2fed37c4f 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
{
- return &t->s[U32_MAX - id];
+ u32 idx = U32_MAX - id;
+
+ return likely(t && idx < t->nr)
+ ? &t->s[idx]
+ : NULL;
}
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
@@ -44,7 +48,8 @@ static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
{
rcu_read_lock();
- id = snapshot_t(c, id)->tree;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ id = s ? s->tree : 0;
rcu_read_unlock();
return id;
@@ -52,7 +57,8 @@ static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
{
- return snapshot_t(c, id)->parent;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ return s ? s->parent : 0;
}
static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
@@ -66,19 +72,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
- u32 parent = snapshot_t(c, id)->parent;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ if (!s)
+ return 0;
- if (parent &&
- snapshot_t(c, id)->depth != snapshot_t(c, parent)->depth + 1)
+ u32 parent = s->parent;
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBU) &&
+ parent &&
+ s->depth != snapshot_t(c, parent)->depth + 1)
panic("id %u depth=%u parent %u depth=%u\n",
id, snapshot_t(c, id)->depth,
parent, snapshot_t(c, parent)->depth);
return parent;
-#else
- return snapshot_t(c, id)->parent;
-#endif
}
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
@@ -116,7 +122,8 @@ static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
{
- return snapshot_t(c, id)->equiv;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ return s ? s->equiv : 0;
}
static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
@@ -133,38 +140,22 @@ static inline bool bch2_snapshot_is_equiv(struct bch_fs *c, u32 id)
return id == bch2_snapshot_equiv(c, id);
}
-static inline bool bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
+static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
{
- const struct snapshot_t *s;
- bool ret;
-
rcu_read_lock();
- s = snapshot_t(c, id);
- ret = s->children[0];
+ const struct snapshot_t *s = snapshot_t(c, id);
+ int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
rcu_read_unlock();
return ret;
}
-static inline u32 bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
-{
- return !bch2_snapshot_is_internal_node(c, id);
-}
-
-static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
+static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
{
- const struct snapshot_t *s;
- u32 parent = __bch2_snapshot_parent(c, id);
-
- if (!parent)
- return 0;
-
- s = snapshot_t(c, __bch2_snapshot_parent(c, id));
- if (id == s->children[0])
- return s->children[1];
- if (id == s->children[1])
- return s->children[0];
- return 0;
+ int ret = bch2_snapshot_is_internal_node(c, id);
+ if (ret < 0)
+ return ret;
+ return !ret;
}
static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
@@ -218,15 +209,34 @@ static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list
static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id)
{
- int ret;
-
BUG_ON(snapshot_list_has_id(s, id));
- ret = darray_push(s, id);
+ int ret = darray_push(s, id);
if (ret)
bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
return ret;
}
+static inline int snapshot_list_add_nodup(struct bch_fs *c, snapshot_id_list *s, u32 id)
+{
+ int ret = snapshot_list_has_id(s, id)
+ ? 0
+ : darray_push(s, id);
+ if (ret)
+ bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
+ return ret;
+}
+
+static inline int snapshot_list_merge(struct bch_fs *c, snapshot_id_list *dst, snapshot_id_list *src)
+{
+ darray_for_each(*src, i) {
+ int ret = snapshot_list_add_nodup(c, dst, *i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot *s);
int bch2_snapshot_get_subvol(struct btree_trans *, u32,
@@ -238,6 +248,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
int bch2_check_snapshot_trees(struct bch_fs *);
int bch2_check_snapshots(struct bch_fs *);
+int bch2_reconstruct_snapshots(struct bch_fs *);
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
void bch2_delete_dead_snapshots_work(struct work_struct *);
@@ -249,7 +260,7 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
struct bpos pos)
{
if (!btree_type_has_snapshots(id) ||
- bch2_snapshot_is_leaf(trans->c, pos.snapshot))
+ bch2_snapshot_is_leaf(trans->c, pos.snapshot) > 0)
return 0;
return __bch2_key_has_snapshot_overwrites(trans, id, pos);
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index ce7aed121942..88a79c823276 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -595,6 +595,78 @@ err:
return ret;
}
+int bch2_initialize_subvolumes(struct bch_fs *c)
+{
+ struct bkey_i_snapshot_tree root_tree;
+ struct bkey_i_snapshot root_snapshot;
+ struct bkey_i_subvolume root_volume;
+ int ret;
+
+ bkey_snapshot_tree_init(&root_tree.k_i);
+ root_tree.k.p.offset = 1;
+ root_tree.v.master_subvol = cpu_to_le32(1);
+ root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
+
+ bkey_snapshot_init(&root_snapshot.k_i);
+ root_snapshot.k.p.offset = U32_MAX;
+ root_snapshot.v.flags = 0;
+ root_snapshot.v.parent = 0;
+ root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
+ root_snapshot.v.tree = cpu_to_le32(1);
+ SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
+
+ bkey_subvolume_init(&root_volume.k_i);
+ root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_volume.v.flags = 0;
+ root_volume.v.snapshot = cpu_to_le32(U32_MAX);
+ root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
+
+ ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ if (!bkey_is_inode(k.k)) {
+ bch_err(trans->c, "root inode not found");
+ ret = -BCH_ERR_ENOENT_inode;
+ goto err;
+ }
+
+ ret = bch2_inode_unpack(k, &inode);
+ BUG_ON(ret);
+
+ inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
+
+ ret = bch2_inode_write(trans, &iter, &inode);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/* set bi_subvol on root inode */
+int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
+{
+ int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
+ __bch2_fs_upgrade_for_subvolumes(trans));
+ bch_err_fn(c, ret);
+ return ret;
+}
+
int bch2_fs_subvolumes_init(struct bch_fs *c)
{
INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 903c05162c06..d2015d549bd2 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -37,6 +37,9 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *);
int bch2_subvolume_unlink(struct btree_trans *, u32);
int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool);
+int bch2_initialize_subvolumes(struct bch_fs *);
+int bch2_fs_upgrade_for_subvolumes(struct bch_fs *);
+
int bch2_fs_subvolumes_init(struct bch_fs *);
#endif /* _BCACHEFS_SUBVOLUME_H */
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
index ae644adfc391..9b10c8947828 100644
--- a/fs/bcachefs/subvolume_types.h
+++ b/fs/bcachefs/subvolume_types.h
@@ -20,6 +20,8 @@ struct snapshot_t {
};
struct snapshot_table {
+ struct rcu_head rcu;
+ size_t nr;
#ifndef RUST_BINDGEN
DECLARE_FLEX_ARRAY(struct snapshot_t, s);
#else
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index ad28e370b640..08ea3dbbbe97 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -8,7 +8,7 @@
#include "journal.h"
#include "journal_sb.h"
#include "journal_seq_blacklist.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "quota.h"
#include "sb-clean.h"
@@ -143,7 +143,7 @@ void bch2_free_super(struct bch_sb_handle *sb)
{
kfree(sb->bio);
if (!IS_ERR_OR_NULL(sb->s_bdev_file))
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
kfree(sb->holder);
kfree(sb->sb_name);
@@ -527,9 +527,11 @@ static void bch2_sb_update(struct bch_fs *c)
memset(c->sb.errors_silent, 0, sizeof(c->sb.errors_silent));
struct bch_sb_field_ext *ext = bch2_sb_field_get(src, ext);
- if (ext)
+ if (ext) {
le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent,
sizeof(c->sb.errors_silent) * 8);
+ c->sb.btrees_lost_data = le64_to_cpu(ext->btrees_lost_data);
+ }
for_each_member_device(c, ca) {
struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
@@ -698,8 +700,11 @@ retry:
return -ENOMEM;
sb->sb_name = kstrdup(path, GFP_KERNEL);
- if (!sb->sb_name)
- return -ENOMEM;
+ if (!sb->sb_name) {
+ ret = -ENOMEM;
+ prt_printf(&err, "error allocating memory for sb_name");
+ goto err;
+ }
#ifndef __KERNEL__
if (opt_get(*opts, direct_io) == false)
@@ -1162,6 +1167,11 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
kfree(errors_silent);
}
+
+ prt_printf(out, "Btrees with missing data:");
+ prt_tab(out);
+ prt_bitflags(out, __bch2_btree_ids, le64_to_cpu(e->btrees_lost_data));
+ prt_newline(out);
}
static const struct bch_sb_field_ops bch_sb_field_ops_ext = {
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 1ad6e5cd9476..88e214c609bb 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -15,6 +15,7 @@
#include "btree_gc.h"
#include "btree_journal_iter.h"
#include "btree_key_cache.h"
+#include "btree_node_scan.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_write_buffer.h"
@@ -287,8 +288,13 @@ static void __bch2_fs_read_only(struct bch_fs *c)
if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
!test_bit(BCH_FS_emergency_ro, &c->flags))
set_bit(BCH_FS_clean_shutdown, &c->flags);
+
bch2_fs_journal_stop(&c->journal);
+ bch_info(c, "%sshutdown complete, journal seq %llu",
+ test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un",
+ c->journal.seq_ondisk);
+
/*
* After stopping journal:
*/
@@ -365,7 +371,7 @@ void bch2_fs_read_only(struct bch_fs *c)
!test_bit(BCH_FS_emergency_ro, &c->flags) &&
test_bit(BCH_FS_started, &c->flags) &&
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
- !c->opts.norecovery) {
+ c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
BUG_ON(atomic_read(&c->btree_cache.dirty));
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
@@ -510,7 +516,8 @@ err:
int bch2_fs_read_write(struct bch_fs *c)
{
- if (c->opts.norecovery)
+ if (c->opts.recovery_pass_last &&
+ c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
return -BCH_ERR_erofs_norecovery;
if (c->opts.nochanges)
@@ -535,7 +542,9 @@ static void __bch2_fs_free(struct bch_fs *c)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c);
+ bch2_fs_allocator_background_exit(c);
bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
@@ -559,6 +568,7 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
bch2_journal_keys_put_initial(c);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
@@ -1015,8 +1025,16 @@ int bch2_fs_start(struct bch_fs *c)
for_each_online_member(c, ca)
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
+ struct bch_sb_field_ext *ext =
+ bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
mutex_unlock(&c->sb_lock);
+ if (!ext) {
+ bch_err(c, "insufficient space in superblock for sb_field_ext");
+ ret = -BCH_ERR_ENOSPC_sb;
+ goto err;
+ }
+
for_each_rw_member(c, ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index ec784d975f66..11bcef170c2c 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -37,6 +37,8 @@ struct bch_member_cpu {
u8 durability;
u8 freespace_initialized;
u8 valid;
+ u8 btree_bitmap_shift;
+ u64 btree_allocated_bitmap;
};
#endif /* _BCACHEFS_SUPER_TYPES_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index c86a93a8d8fc..5be92fe3f4ea 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -17,7 +17,6 @@
#include "btree_iter.h"
#include "btree_key_cache.h"
#include "btree_update.h"
-#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
@@ -26,6 +25,7 @@
#include "ec.h"
#include "inode.h"
#include "journal.h"
+#include "journal_reclaim.h"
#include "keylist.h"
#include "move.h"
#include "movinggc.h"
@@ -139,6 +139,7 @@ do { \
write_attribute(trigger_gc);
write_attribute(trigger_discards);
write_attribute(trigger_invalidates);
+write_attribute(trigger_journal_flush);
write_attribute(prune_cache);
write_attribute(btree_wakeup);
rw_attribute(btree_gc_periodic);
@@ -166,7 +167,6 @@ read_attribute(btree_write_stats);
read_attribute(btree_cache_size);
read_attribute(compression_stats);
read_attribute(journal_debug);
-read_attribute(btree_updates);
read_attribute(btree_cache);
read_attribute(btree_key_cache);
read_attribute(stripes_heap);
@@ -415,9 +415,6 @@ SHOW(bch2_fs)
if (attr == &sysfs_journal_debug)
bch2_journal_debug_to_text(out, &c->journal);
- if (attr == &sysfs_btree_updates)
- bch2_btree_updates_to_text(out, c);
-
if (attr == &sysfs_btree_cache)
bch2_btree_cache_to_text(out, c);
@@ -505,7 +502,7 @@ STORE(bch2_fs)
/* Debugging: */
- if (!test_bit(BCH_FS_rw, &c->flags))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
return -EROFS;
if (attr == &sysfs_prune_cache) {
@@ -538,6 +535,11 @@ STORE(bch2_fs)
if (attr == &sysfs_trigger_invalidates)
bch2_do_invalidates(c);
+ if (attr == &sysfs_trigger_journal_flush) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_meta(&c->journal);
+ }
+
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
@@ -558,6 +560,7 @@ STORE(bch2_fs)
size = ret;
}
#endif
+ bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
return size;
}
SYSFS_OPS(bch2_fs);
@@ -639,7 +642,6 @@ SYSFS_OPS(bch2_fs_internal);
struct attribute *bch2_fs_internal_files[] = {
&sysfs_flags,
&sysfs_journal_debug,
- &sysfs_btree_updates,
&sysfs_btree_cache,
&sysfs_btree_key_cache,
&sysfs_new_stripes,
@@ -657,6 +659,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_gc,
&sysfs_trigger_discards,
&sysfs_trigger_invalidates,
+ &sysfs_trigger_journal_flush,
&sysfs_prune_cache,
&sysfs_btree_wakeup,
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index b3fe9fc57747..bfec656f94c0 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index 940db15d6a93..b1af7ac430f6 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -294,16 +294,27 @@ static int thread_with_stdio_fn(void *arg)
return 0;
}
-int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
- const struct thread_with_stdio_ops *ops)
+void bch2_thread_with_stdio_init(struct thread_with_stdio *thr,
+ const struct thread_with_stdio_ops *ops)
{
stdio_buf_init(&thr->stdio.input);
stdio_buf_init(&thr->stdio.output);
thr->ops = ops;
+}
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *thr)
+{
return bch2_run_thread_with_file(&thr->thr, &thread_with_stdio_fops, thread_with_stdio_fn);
}
+int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
+ const struct thread_with_stdio_ops *ops)
+{
+ bch2_thread_with_stdio_init(thr, ops);
+
+ return __bch2_run_thread_with_stdio(thr);
+}
+
int bch2_run_thread_with_stdout(struct thread_with_stdio *thr,
const struct thread_with_stdio_ops *ops)
{
diff --git a/fs/bcachefs/thread_with_file.h b/fs/bcachefs/thread_with_file.h
index af54ea8f5b0f..1d63d14d7dca 100644
--- a/fs/bcachefs/thread_with_file.h
+++ b/fs/bcachefs/thread_with_file.h
@@ -63,6 +63,9 @@ struct thread_with_stdio {
const struct thread_with_stdio_ops *ops;
};
+void bch2_thread_with_stdio_init(struct thread_with_stdio *,
+ const struct thread_with_stdio_ops *);
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *);
int bch2_run_thread_with_stdio(struct thread_with_stdio *,
const struct thread_with_stdio_ops *);
int bch2_run_thread_with_stdout(struct thread_with_stdio *,
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 216fadf16928..92c6ad75e702 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -707,149 +707,6 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
}
}
-static int alignment_ok(const void *base, size_t align)
-{
- return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- ((unsigned long)base & (align - 1)) == 0;
-}
-
-static void u32_swap(void *a, void *b, size_t size)
-{
- u32 t = *(u32 *)a;
- *(u32 *)a = *(u32 *)b;
- *(u32 *)b = t;
-}
-
-static void u64_swap(void *a, void *b, size_t size)
-{
- u64 t = *(u64 *)a;
- *(u64 *)a = *(u64 *)b;
- *(u64 *)b = t;
-}
-
-static void generic_swap(void *a, void *b, size_t size)
-{
- char t;
-
- do {
- t = *(char *)a;
- *(char *)a++ = *(char *)b;
- *(char *)b++ = t;
- } while (--size > 0);
-}
-
-static inline int do_cmp(void *base, size_t n, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- size_t l, size_t r)
-{
- return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- size);
-}
-
-static inline void do_swap(void *base, size_t n, size_t size,
- void (*swap_func)(void *, void *, size_t),
- size_t l, size_t r)
-{
- swap_func(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- size);
-}
-
-void eytzinger0_sort(void *base, size_t n, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t))
-{
- int i, c, r;
-
- if (!swap_func) {
- if (size == 4 && alignment_ok(base, 4))
- swap_func = u32_swap;
- else if (size == 8 && alignment_ok(base, 8))
- swap_func = u64_swap;
- else
- swap_func = generic_swap;
- }
-
- /* heapify */
- for (i = n / 2 - 1; i >= 0; --i) {
- for (r = i; r * 2 + 1 < n; r = c) {
- c = r * 2 + 1;
-
- if (c + 1 < n &&
- do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
- c++;
-
- if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
- break;
-
- do_swap(base, n, size, swap_func, r, c);
- }
- }
-
- /* sort */
- for (i = n - 1; i > 0; --i) {
- do_swap(base, n, size, swap_func, 0, i);
-
- for (r = 0; r * 2 + 1 < i; r = c) {
- c = r * 2 + 1;
-
- if (c + 1 < i &&
- do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
- c++;
-
- if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
- break;
-
- do_swap(base, n, size, swap_func, r, c);
- }
- }
-}
-
-void sort_cmp_size(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t size))
-{
- /* pre-scale counters for performance */
- int i = (num/2 - 1) * size, n = num * size, c, r;
-
- if (!swap_func) {
- if (size == 4 && alignment_ok(base, 4))
- swap_func = u32_swap;
- else if (size == 8 && alignment_ok(base, 8))
- swap_func = u64_swap;
- else
- swap_func = generic_swap;
- }
-
- /* heapify */
- for ( ; i >= 0; i -= size) {
- for (r = i; r * 2 + size < n; r = c) {
- c = r * 2 + size;
- if (c < n - size &&
- cmp_func(base + c, base + c + size, size) < 0)
- c += size;
- if (cmp_func(base + r, base + c, size) >= 0)
- break;
- swap_func(base + r, base + c, size);
- }
- }
-
- /* sort */
- for (i = n - size; i > 0; i -= size) {
- swap_func(base, base + i, size);
- for (r = 0; r * 2 + size < i; r = c) {
- c = r * 2 + size;
- if (c < i - size &&
- cmp_func(base + c, base + c + size, size) < 0)
- c += size;
- if (cmp_func(base + r, base + c, size) >= 0)
- break;
- swap_func(base + r, base + c, size);
- }
- }
-}
-
#if 0
void eytzinger1_test(void)
{
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 175aee3074c7..5cf885b09986 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -631,10 +631,6 @@ static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
memset(s + bytes, c, rem);
}
-void sort_cmp_size(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t));
-
/* just the memmove, doesn't update @_nr */
#define __array_insert_item(_array, _nr, _pos) \
memmove(&(_array)[(_pos) + 1], \
@@ -792,9 +788,27 @@ static inline int copy_from_user_errcode(void *to, const void __user *from, unsi
#endif
+static inline void mod_bit(long nr, volatile unsigned long *addr, bool v)
+{
+ if (v)
+ set_bit(nr, addr);
+ else
+ clear_bit(nr, addr);
+}
+
static inline void __set_bit_le64(size_t bit, __le64 *addr)
{
addr[bit / 64] |= cpu_to_le64(BIT_ULL(bit % 64));
}
+static inline void __clear_bit_le64(size_t bit, __le64 *addr)
+{
+ addr[bit / 64] &= ~cpu_to_le64(BIT_ULL(bit % 64));
+}
+
+static inline bool test_bit_le64(size_t bit, __le64 *addr)
+{
+ return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0;
+}
+
#endif /* _BCACHEFS_UTIL_H */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 1920ed69279b..3314249e8674 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1359,7 +1359,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index c1e6a5bbeeaf..58110c968667 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2776,20 +2776,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = kvmalloc(alloc_bytes, GFP_KERNEL);
+ data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- if (total_bytes >= sizeof(*data)) {
+ if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
- data->bytes_missing = 0;
- } else {
+ else
data->bytes_missing = sizeof(*data) - total_bytes;
- data->bytes_left = 0;
- }
-
- data->elem_cnt = 0;
- data->elem_missed = 0;
return data;
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 5f7587ca1ca7..1e09aeea69c2 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1559,7 +1559,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* needing to allocate extents from the block group.
*/
used = btrfs_space_info_used(space_info, true);
- if (space_info->total_bytes - block_group->length < used) {
+ if (space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) {
/*
* Add a reference for the list, compensate for the ref
* drop under the "next" label for the
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index dd6f566a383f..121ab890bd05 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1133,6 +1133,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
+ ret = btrfs_record_root_in_trans(trans, node->root);
+ if (ret)
+ return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index beedd6ed64d3..257d044bca91 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3464,6 +3464,14 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
if (root_id != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_ref generic_ref = { 0 };
+ /*
+ * Assert that the extent buffer is not cleared due to
+ * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
+ * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
+ * detail.
+ */
+ ASSERT(btrfs_header_bytenr(buf) != 0);
+
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
buf->start, buf->len, parent,
btrfs_header_owner(buf));
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7441245b1ceb..2776112dbdf8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -681,31 +681,21 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
gfp_t extra_gfp)
{
+ const gfp_t gfp = GFP_NOFS | extra_gfp;
unsigned int allocated;
for (allocated = 0; allocated < nr_pages;) {
unsigned int last = allocated;
- allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
- nr_pages, page_array);
-
- if (allocated == nr_pages)
- return 0;
-
- /*
- * During this iteration, no page could be allocated, even
- * though alloc_pages_bulk_array() falls back to alloc_page()
- * if it could not bulk-allocate. So we must be out of memory.
- */
- if (allocated == last) {
+ allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+ if (unlikely(allocated == last)) {
+ /* No progress, fail and do cleanup. */
for (int i = 0; i < allocated; i++) {
__free_page(page_array[i]);
page_array[i] = NULL;
}
return -ENOMEM;
}
-
- memalloc_retry_wait(GFP_NOFS);
}
return 0;
}
@@ -4154,7 +4144,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
* The actual zeroout of the buffer will happen later in
* btree_csum_one_bio.
*/
- if (btrfs_is_zoned(fs_info)) {
+ if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
return;
}
@@ -4193,6 +4183,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
num_folios = num_extent_folios(eb);
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+ WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
if (!was_dirty) {
bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
@@ -4333,6 +4324,19 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
goto done;
+ /*
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
+ * started and finished reading the same eb. In this case, UPTODATE
+ * will now be set, and we shouldn't read it in again.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
+ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+ return 0;
+ }
+
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 347ca13d15a9..24a048210b15 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -309,7 +309,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- start, len, gen);
+ start, start + len, gen);
ret = -ENOENT;
goto out;
}
@@ -318,7 +318,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- em->start, start, len, gen);
+ em->start, start, start + len, gen);
ret = -EUCLEAN;
goto out;
}
@@ -340,9 +340,9 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
em->mod_len = em->len;
}
- free_extent_map(em);
out:
write_unlock(&tree->lock);
+ free_extent_map(em);
return ret;
}
@@ -629,13 +629,13 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
*/
ret = merge_extent_mapping(em_tree, existing,
em, start);
- if (ret) {
+ if (WARN_ON(ret)) {
free_extent_map(em);
*em_in = NULL;
- WARN_ONCE(ret,
-"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu\n",
- existing->start, existing->len,
- orig_start, orig_len, start);
+ btrfs_warn(fs_info,
+"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
+ existing->start, extent_map_end(existing),
+ orig_start, orig_start + orig_len, start);
}
free_extent_map(existing);
}
@@ -817,7 +817,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->block_len = em->block_len;
split->orig_start = em->orig_start;
} else {
- const u64 diff = start + len - em->start;
+ const u64 diff = end - em->start;
split->block_len = split->len;
split->block_start += diff;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 37701531eeb1..7fed887e700c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1145,13 +1145,13 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
0, *alloc_hint, &ins, 1, 1);
if (ret) {
/*
- * Here we used to try again by going back to non-compressed
- * path for ENOSPC. But we can't reserve space even for
- * compressed size, how could it work for uncompressed size
- * which requires larger size? So here we directly go error
- * path.
+ * We can't reserve contiguous space for the compressed size.
+ * Unlikely, but it's possible that we could have enough
+ * non-contiguous space for the uncompressed size instead. So
+ * fall back to uncompressed.
*/
- goto out_free;
+ submit_uncompressed_range(inode, async_extent, locked_page);
+ goto done;
}
/* Here we're doing allocation and writeback of the compressed pages */
@@ -1203,7 +1203,6 @@ done:
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
-out_free:
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
@@ -2533,7 +2532,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
*/
if (bits & EXTENT_CLEAR_META_RESV &&
root != fs_info->tree_root)
- btrfs_delalloc_release_metadata(inode, len, false);
+ btrfs_delalloc_release_metadata(inode, len, true);
/* For sanity tests. */
if (btrfs_is_testing(fs_info))
@@ -4503,6 +4502,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
struct btrfs_block_rsv block_rsv;
u64 root_flags;
+ u64 qgroup_reserved = 0;
int ret;
down_write(&fs_info->subvol_sem);
@@ -4547,12 +4547,20 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
goto out_undead;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_release;
}
+ ret = btrfs_record_root_in_trans(trans, root);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_end_trans;
+ }
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
@@ -4611,7 +4619,9 @@ out_end_trans:
ret = btrfs_end_transaction(trans);
inode->i_flags |= S_DEAD;
out_release:
- btrfs_subvolume_release_metadata(root, &block_rsv);
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 294e31edec9d..55f3ba6a831c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -613,6 +613,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
int ret;
dev_t anon_dev;
u64 objectid;
+ u64 qgroup_reserved = 0;
root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
if (!root_item)
@@ -650,13 +651,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
trans_num_items, false);
if (ret)
goto out_new_inode_args;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- btrfs_subvolume_release_metadata(root, &block_rsv);
- goto out_new_inode_args;
+ goto out_release_rsv;
}
+ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
+ if (ret)
+ goto out;
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
/* Tree log can't currently deal with an inode which is a new root. */
@@ -767,9 +773,11 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
out:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- btrfs_subvolume_release_metadata(root, &block_rsv);
-
btrfs_end_transaction(trans);
+out_release_rsv:
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
@@ -791,6 +799,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct btrfs_pending_snapshot *pending_snapshot;
unsigned int trans_num_items;
struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv *block_rsv;
+ u64 qgroup_reserved = 0;
int ret;
/* We do not support snapshotting right now. */
@@ -827,19 +837,19 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto free_pending;
}
- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
- BTRFS_BLOCK_RSV_TEMP);
+ block_rsv = &pending_snapshot->block_rsv;
+ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
* 1 to add dir item
* 1 to add dir index
* 1 to update parent inode item
*/
trans_num_items = create_subvol_num_items(inherit) + 3;
- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
- &pending_snapshot->block_rsv,
+ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
trans_num_items, false);
if (ret)
goto free_pending;
+ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
@@ -852,6 +862,13 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
ret = PTR_ERR(trans);
goto fail;
}
+ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
+ if (ret) {
+ btrfs_end_transaction(trans);
+ goto fail;
+ }
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->pending_snapshot = pending_snapshot;
@@ -881,7 +898,9 @@ fail:
if (ret && pending_snapshot->snap)
pending_snapshot->snap->anon_dev = 0;
btrfs_put_root(pending_snapshot->snap);
- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
+ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
free_pending:
if (pending_snapshot->anon_dev)
free_anon_bdev(pending_snapshot->anon_dev);
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index c96dd66fd0f7..210d9c82e2ae 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -7,7 +7,7 @@
#ifdef CONFIG_PRINTK
-#define STATE_STRING_PREFACE ": state "
+#define STATE_STRING_PREFACE " state "
#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5f90f0605b12..cf8820ce7aa2 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -4495,6 +4495,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
+ if (!sb_rdonly(fs_info->sb))
+ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 4bb538a372ce..7007f9e0c972 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -548,13 +548,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
}
return ret;
}
-
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 qgroup_to_release;
-
- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
-}
diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
index 6f929cf3bd49..8f5739e732b9 100644
--- a/fs/btrfs/root-tree.h
+++ b/fs/btrfs/root-tree.h
@@ -18,8 +18,6 @@ struct btrfs_trans_handle;
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv);
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
u64 ref_id, u64 dirid, u64 sequence,
const struct fscrypt_str *name);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c4bd0e60db59..4b22cfe9a98c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1012,6 +1012,7 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
struct btrfs_fs_info *fs_info = sctx->fs_info;
int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length);
+ unsigned long repaired;
int mirror;
int i;
@@ -1078,16 +1079,15 @@ out:
* Submit the repaired sectors. For zoned case, we cannot do repair
* in-place, but queue the bg to be relocated.
*/
- if (btrfs_is_zoned(fs_info)) {
- if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+ bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
+ stripe->nr_sectors);
+ if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
+ if (btrfs_is_zoned(fs_info)) {
btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
- } else if (!sctx->readonly) {
- unsigned long repaired;
-
- bitmap_andnot(&repaired, &stripe->init_error_bitmap,
- &stripe->error_bitmap, stripe->nr_sectors);
- scrub_write_sectors(sctx, stripe, repaired, false);
- wait_scrub_stripe_io(stripe);
+ } else {
+ scrub_write_sectors(sctx, stripe, repaired, false);
+ wait_scrub_stripe_io(stripe);
+ }
}
scrub_stripe_report_errors(sctx, stripe);
@@ -2812,7 +2812,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
+ if (ret == -ENOENT)
+ break;
+
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ continue;
+ }
+
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 253cce7ffecf..47b5d301038e 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -847,6 +847,11 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
goto out;
}
+ if (em->block_start != SZ_32K + SZ_4K) {
+ test_err("em->block_start is %llu, expected 36K", em->block_start);
+ goto out;
+ }
+
free_extent_map(em);
read_lock(&em_tree->lock);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 46e8426adf4f..85f359e0e0a7 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -745,14 +745,6 @@ again:
h->reloc_reserved = reloc_reserved;
}
- /*
- * Now that we have found a transaction to be a part of, convert the
- * qgroup reservation from prealloc to pertrans. A different transaction
- * can't race in and free our pertrans out from under us.
- */
- if (qgroup_reserved)
- btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
-
got_it:
if (!current->journal_info)
current->journal_info = h;
@@ -786,8 +778,15 @@ got_it:
* not just freed.
*/
btrfs_end_transaction(h);
- return ERR_PTR(ret);
+ goto reserve_fail;
}
+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
return h;
@@ -1495,6 +1494,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
+ btrfs_qgroup_free_meta_all_pertrans(root);
spin_unlock(&fs_info->fs_roots_radix_lock);
btrfs_free_log(trans, root);
@@ -1519,7 +1519,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
if (ret2)
return ret2;
spin_lock(&fs_info->fs_roots_radix_lock);
- btrfs_qgroup_free_meta_all_pertrans(root);
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1dc1f1946ae0..f15591f3e54f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -692,6 +692,16 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
device->bdev = file_bdev(bdev_file);
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (device->devt != device->bdev->bd_dev) {
+ btrfs_warn(NULL,
+ "device %s maj:min changed from %d:%d to %d:%d",
+ device->name->str, MAJOR(device->devt),
+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),
+ MINOR(device->bdev->bd_dev));
+
+ device->devt = device->bdev->bd_dev;
+ }
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1174,23 +1184,30 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
+ int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
dev_list) {
- int ret;
+ int ret2;
- ret = btrfs_open_one_device(fs_devices, device, flags, holder);
- if (ret == 0 &&
+ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
+ if (ret2 == 0 &&
(!latest_dev || device->generation > latest_dev->generation)) {
latest_dev = device;
- } else if (ret == -ENODATA) {
+ } else if (ret2 == -ENODATA) {
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
}
+ if (ret == 0 && ret2 != 0)
+ ret = ret2;
}
- if (fs_devices->open_devices == 0)
+
+ if (fs_devices->open_devices == 0) {
+ if (ret)
+ return ret;
return -EINVAL;
+ }
fs_devices->opened = 1;
fs_devices->latest_dev = latest_dev;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 5a3d5ec75c5a..4cba80b34387 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1574,11 +1574,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
if (!map)
return -EINVAL;
- cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
- if (!cache->physical_map) {
- ret = -ENOMEM;
- goto out;
- }
+ cache->physical_map = map;
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
if (!zone_info) {
@@ -1690,7 +1686,6 @@ out:
}
bitmap_free(active);
kfree(zone_info);
- btrfs_free_chunk_map(map);
return ret;
}
@@ -2175,6 +2170,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
struct btrfs_chunk_map *map;
const bool is_metadata = (block_group->flags &
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret = 0;
int i;
@@ -2250,6 +2246,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
+ down_read(&dev_replace->rwsem);
map = block_group->physical_map;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
@@ -2266,13 +2263,16 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
zinfo->zone_size >> SECTOR_SHIFT);
memalloc_nofs_restore(nofs_flags);
- if (ret)
+ if (ret) {
+ up_read(&dev_replace->rwsem);
return ret;
+ }
if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
zinfo->reserved_active_zones++;
btrfs_dev_clear_active_zone(device, physical);
}
+ up_read(&dev_replace->rwsem);
if (!fully_written)
btrfs_dec_block_group_ro(block_group);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1340d77124ae..ee9caf7916fb 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
ihold(inode);
if (wbc->sync_mode == WB_SYNC_NONE &&
- ceph_inode_to_fs_client(inode)->write_congested)
+ ceph_inode_to_fs_client(inode)->write_congested) {
+ redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
+ }
wait_on_page_fscache(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 55051ad09c19..c4941ba245ac 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4783,13 +4783,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
ceph_vinop(inode));
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list))
list_del_init(&ci->i_cap_delay_list);
list_add_tail(&ci->i_cap_delay_list,
&mdsc->cap_unlink_delay_list);
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
/*
* Fire the work immediately, because the MDS maybe
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 3ab9c268a8bb..360b686c3c67 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work)
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "begin\n");
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_unlink_delay_list)) {
struct ceph_inode_info *ci;
struct inode *inode;
@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work)
inode = igrab(&ci->netfs.inode);
if (inode) {
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "on %p %llx.%llx\n", inode,
ceph_vinop(inode));
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
iput(inode);
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
}
}
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n");
}
@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
- spin_lock_init(&mdsc->cap_unlink_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 03f8ff00874f..b88e80415224 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -461,9 +461,8 @@ struct ceph_mds_client {
struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
- spinlock_t cap_delay_lock; /* protects cap_delay_list */
struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
- spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
+ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 39e75131fd5a..9901057a15ba 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
kfree(sbi);
}
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 69308fd73e4a..c0eb139adb07 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -430,7 +430,6 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
set_opt(&ctx->opt, DAX_ALWAYS);
clear_opt(&ctx->opt, DAX_NEVER);
return true;
diff --git a/fs/exec.c b/fs/exec.c
index ff6f26671cfc..cf1df7f16e55 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -895,6 +895,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
goto out;
}
+ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cfb8449c731f..044135796f2b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5668,7 +5668,7 @@ failed_mount:
brelse(sbi->s_sbh);
if (sbi->s_journal_bdev_file) {
invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
- fput(sbi->s_journal_bdev_file);
+ bdev_fput(sbi->s_journal_bdev_file);
}
out_fail:
invalidate_bdev(sb->s_bdev);
@@ -5913,7 +5913,7 @@ static struct file *ext4_get_journal_blkdev(struct super_block *sb,
out_bh:
brelse(bh);
out_bdev:
- fput(bdev_file);
+ bdev_fput(bdev_file);
return ERR_PTR(errno);
}
@@ -5952,7 +5952,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
- fput(bdev_file);
+ bdev_fput(bdev_file);
return ERR_PTR(errno);
}
@@ -7327,7 +7327,7 @@ static void ext4_kill_sb(struct super_block *sb)
kill_block_super(sb);
if (bdev_file)
- fput(bdev_file);
+ bdev_fput(bdev_file);
}
static struct file_system_type ext4_fs_type = {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a6867f26f141..a4bc26dfdb1a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1558,7 +1558,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
- fput(FDEV(i).bdev_file);
+ bdev_fput(FDEV(i).bdev_file);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index b6cad106c37e..0b2da7b7e2ad 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -310,6 +310,10 @@ struct cuse_init_args {
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
+ * @fm: The fuse mount information containing the CUSE connection.
+ * @args: The arguments passed to the init reply.
+ * @error: The error code signifying if any error occurred during the process.
+ *
* This function creates the character device and sets up all the
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 4a6df591add6..2b0d4781f394 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1321,6 +1321,7 @@ retry:
err = fuse_do_statx(inode, file, stat);
if (err == -ENOSYS) {
fc->no_statx = 1;
+ err = 0;
goto retry;
}
} else {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a56e7bffd000..b57ce4157640 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1362,7 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
bool *exclusive)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_file *ff = iocb->ki_filp->private_data;
+ struct fuse_inode *fi = get_fuse_inode(inode);
*exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
if (*exclusive) {
@@ -1377,7 +1377,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
* have raced, so check it again.
*/
if (fuse_io_past_eof(iocb, from) ||
- fuse_file_uncached_io_start(inode, ff, NULL) != 0) {
+ fuse_inode_uncached_io_start(fi, NULL) != 0) {
inode_unlock_shared(inode);
inode_lock(inode);
*exclusive = true;
@@ -1388,13 +1388,13 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_file *ff = iocb->ki_filp->private_data;
+ struct fuse_inode *fi = get_fuse_inode(inode);
if (exclusive) {
inode_unlock(inode);
} else {
/* Allow opens in caching mode after last parallel dio end */
- fuse_file_uncached_io_end(inode, ff);
+ fuse_inode_uncached_io_end(fi);
inode_unlock_shared(inode);
}
}
@@ -2574,8 +2574,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
* First mmap of direct_io file enters caching inode io mode.
* Also waits for parallel dio writers to go into serial mode
* (exclusive instead of shared lock).
+ * After first mmap, the inode stays in caching io mode until
+ * the direct_io file release.
*/
- rc = fuse_file_cached_io_start(inode, ff);
+ rc = fuse_file_cached_io_open(inode, ff);
if (rc)
return rc;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index b24084b60864..f23919610313 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -1394,9 +1394,10 @@ int fuse_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
/* iomode.c */
-int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff);
-int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb);
-void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff);
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff);
+int fuse_inode_uncached_io_start(struct fuse_inode *fi,
+ struct fuse_backing *fb);
+void fuse_inode_uncached_io_end(struct fuse_inode *fi);
int fuse_file_io_open(struct file *file, struct inode *inode);
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3a5d88878335..99e44ea7d875 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -175,6 +175,7 @@ static void fuse_evict_inode(struct inode *inode)
}
}
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ WARN_ON(fi->iocachectr != 0);
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c
index c653ddcf0578..c99e285f3183 100644
--- a/fs/fuse/iomode.c
+++ b/fs/fuse/iomode.c
@@ -21,12 +21,13 @@ static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
}
/*
- * Start cached io mode.
+ * Called on cached file open() and on first mmap() of direct_io file.
+ * Takes cached_io inode mode reference to be dropped on file release.
*
* Blocks new parallel dio writes and waits for the in-progress parallel dio
* writes to complete.
*/
-int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -67,10 +68,9 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
return 0;
}
-static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
+static void fuse_file_cached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
-
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr <= 0);
WARN_ON(ff->iomode != IOM_CACHED);
@@ -82,16 +82,15 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
}
/* Start strictly uncached io mode where cache access is not allowed */
-int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb)
+int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_backing *oldfb;
int err = 0;
spin_lock(&fi->lock);
/* deny conflicting backing files on same fuse inode */
oldfb = fuse_inode_backing(fi);
- if (oldfb && oldfb != fb) {
+ if (fb && oldfb && oldfb != fb) {
err = -EBUSY;
goto unlock;
}
@@ -99,12 +98,10 @@ int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struc
err = -ETXTBSY;
goto unlock;
}
- WARN_ON(ff->iomode != IOM_NONE);
fi->iocachectr--;
- ff->iomode = IOM_UNCACHED;
/* fuse inode holds a single refcount of backing file */
- if (!oldfb) {
+ if (fb && !oldfb) {
oldfb = fuse_inode_backing_set(fi, fb);
WARN_ON_ONCE(oldfb != NULL);
} else {
@@ -115,15 +112,29 @@ unlock:
return err;
}
-void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
+/* Takes uncached_io inode mode reference to be dropped on file release */
+static int fuse_file_uncached_io_open(struct inode *inode,
+ struct fuse_file *ff,
+ struct fuse_backing *fb)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ int err;
+
+ err = fuse_inode_uncached_io_start(fi, fb);
+ if (err)
+ return err;
+
+ WARN_ON(ff->iomode != IOM_NONE);
+ ff->iomode = IOM_UNCACHED;
+ return 0;
+}
+
+void fuse_inode_uncached_io_end(struct fuse_inode *fi)
+{
struct fuse_backing *oldfb = NULL;
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr >= 0);
- WARN_ON(ff->iomode != IOM_UNCACHED);
- ff->iomode = IOM_NONE;
fi->iocachectr++;
if (!fi->iocachectr) {
wake_up(&fi->direct_io_waitq);
@@ -134,6 +145,15 @@ void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
fuse_backing_put(oldfb);
}
+/* Drop uncached_io reference from passthrough open */
+static void fuse_file_uncached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
+{
+ WARN_ON(ff->iomode != IOM_UNCACHED);
+ ff->iomode = IOM_NONE;
+ fuse_inode_uncached_io_end(fi);
+}
+
/*
* Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
* A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
@@ -163,7 +183,7 @@ static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
return PTR_ERR(fb);
/* First passthrough file open denies caching inode io mode */
- err = fuse_file_uncached_io_start(inode, ff, fb);
+ err = fuse_file_uncached_io_open(inode, ff, fb);
if (!err)
return 0;
@@ -216,7 +236,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
if (ff->open_flags & FOPEN_PASSTHROUGH)
err = fuse_file_passthrough_open(inode, file);
else
- err = fuse_file_cached_io_start(inode, ff);
+ err = fuse_file_cached_io_open(inode, ff);
if (err)
goto fail;
@@ -236,8 +256,10 @@ fail:
/* No more pending io and no new io possible to inode via open/mmapped file */
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
/*
- * Last parallel dio close allows caching inode io mode.
+ * Last passthrough file close allows caching inode io mode.
* Last caching file close exits caching inode io mode.
*/
switch (ff->iomode) {
@@ -245,10 +267,10 @@ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
/* Nothing to do */
break;
case IOM_UNCACHED:
- fuse_file_uncached_io_end(inode, ff);
+ fuse_file_uncached_io_release(ff, fi);
break;
case IOM_CACHED:
- fuse_file_cached_io_end(inode, ff);
+ fuse_file_cached_io_release(ff, fi);
break;
}
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 789af5c8fade..aa1626955b2c 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1718,7 +1718,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ unsigned int bsize = 1 << bsize_shift;
+ u64 lblock = (offset + bsize - 1) >> bsize_shift;
__u16 start_list[GFS2_MAX_META_HEIGHT];
__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
unsigned int start_aligned, end_aligned;
@@ -1729,7 +1730,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
u64 prev_bnr = 0;
__be64 *start, *end;
- if (offset >= maxsize) {
+ if (offset + bsize - 1 >= maxsize) {
/*
* The starting point lies beyond the allocated metadata;
* there are no blocks to deallocate.
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d5abfdf0f22..fb0628e680c4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -769,7 +769,7 @@ static int ioctl_getfsuuid(struct file *file, void __user *argp)
struct fsuuid2 u = { .len = sb->s_uuid_len, };
if (!sb->s_uuid_len)
- return -ENOIOCTLCMD;
+ return -ENOTTY;
memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len);
@@ -781,7 +781,7 @@ static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
struct super_block *sb = file_inode(file)->i_sb;
if (!strlen(sb->s_sysfs_name))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
struct fs_sysfs_path u = {};
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 73389c68e251..9609349e92e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1141,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
- fput(bdev_file);
+ bdev_fput(bdev_file);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@@ -1485,7 +1485,7 @@ int lmLogClose(struct super_block *sb)
bdev_file = log->bdev_file;
rc = lmLogShutdown(log);
- fput(bdev_file);
+ bdev_fput(bdev_file);
kfree(log);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e9df2f87072c..8502ef68459b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -636,11 +636,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
* each file a separate locking class. Let's differentiate on
* whether the file has mmap or not for now.
*
- * Both paths of the branch look the same. They're supposed to
+ * For similar reasons, writable and readonly files are given different
+ * lockdep key, because the writable file /sys/power/resume may call vfs
+ * lookup helpers for arbitrary paths and readonly files can be read by
+ * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
+ *
+ * All three cases look the same. They're supposed to
* look that way and give @of->mutex different static lockdep keys.
*/
if (has_mmap)
mutex_init(&of->mutex);
+ else if (file->f_mode & FMODE_WRITE)
+ mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
diff --git a/fs/namei.c b/fs/namei.c
index ceb9ddf8dfdd..c5b2a25be7d0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4050,6 +4050,8 @@ retry:
case 0: case S_IFREG:
error = vfs_create(idmap, path.dentry->d_inode,
dentry, mode, true);
+ if (!error)
+ security_path_post_mknod(idmap, dentry);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(idmap, path.dentry->d_inode,
@@ -4060,11 +4062,6 @@ retry:
dentry, mode, 0);
break;
}
-
- if (error)
- goto out2;
-
- security_path_post_mknod(idmap, dentry);
out2:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 9a0d32e4b422..267b622d923b 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -164,7 +164,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
- ssize_t written = 0, ret;
+ ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
bool maybe_trouble = false;
@@ -172,15 +172,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
) {
- if (pos < i_size_read(inode)) {
- ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
- if (ret < 0) {
- goto out;
- }
- }
-
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+ ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+ if (ret < 0) {
+ wbc_detach_inode(&wbc);
+ goto out;
+ }
+
wreq = netfs_begin_writethrough(iocb, iter->count);
if (IS_ERR(wreq)) {
wbc_detach_inode(&wbc);
@@ -395,10 +394,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
out:
if (unlikely(wreq)) {
- ret = netfs_end_writethrough(wreq, iocb);
+ ret2 = netfs_end_writethrough(wreq, iocb);
wbc_detach_inode(&wbc);
- if (ret == -EIOCBQUEUED)
- return ret;
+ if (ret2 == -EIOCBQUEUED)
+ return ret2;
+ if (ret == 0)
+ ret = ret2;
}
iocb->ki_pos += written;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 87c9547989f6..e88aca0c6e8e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -983,15 +983,7 @@ static struct workqueue_struct *callback_wq;
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
{
trace_nfsd_cb_queue(cb->cb_clp, cb);
- return queue_delayed_work(callback_wq, &cb->cb_work, 0);
-}
-
-static void nfsd4_queue_cb_delayed(struct nfsd4_callback *cb,
- unsigned long msecs)
-{
- trace_nfsd_cb_queue(cb->cb_clp, cb);
- queue_delayed_work(callback_wq, &cb->cb_work,
- msecs_to_jiffies(msecs));
+ return queue_work(callback_wq, &cb->cb_work);
}
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
@@ -1490,7 +1482,7 @@ static void
nfsd4_run_cb_work(struct work_struct *work)
{
struct nfsd4_callback *cb =
- container_of(work, struct nfsd4_callback, cb_work.work);
+ container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
int flags;
@@ -1502,16 +1494,8 @@ nfsd4_run_cb_work(struct work_struct *work)
clnt = clp->cl_cb_client;
if (!clnt) {
- if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
- nfsd41_destroy_cb(cb);
- else {
- /*
- * XXX: Ideally, we could wait for the client to
- * reconnect, but I haven't figured out how
- * to do that yet.
- */
- nfsd4_queue_cb_delayed(cb, 25);
- }
+ /* Callback channel broken, or client killed; give up: */
+ nfsd41_destroy_cb(cb);
return;
}
@@ -1544,7 +1528,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
cb->cb_ops = ops;
- INIT_DELAYED_WORK(&cb->cb_work, nfsd4_run_cb_work);
+ INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
cb->cb_status = 0;
cb->cb_need_restart = false;
cb->cb_holds_slot = false;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1a93c7fcf76c..84d4093ca713 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3042,12 +3042,9 @@ static void
nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- spin_lock(&nn->client_lock);
clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
- put_client_renew_locked(clp);
- spin_unlock(&nn->client_lock);
+ drop_client(clp);
}
static int
@@ -3831,15 +3828,20 @@ nfsd4_create_session(struct svc_rqst *rqstp,
else
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
- if (status) {
- if (status == nfserr_replay_cache) {
- status = nfsd4_replay_create_session(cr_ses, cs_slot);
- goto out_free_conn;
- }
+ switch (status) {
+ case nfs_ok:
+ cs_slot->sl_seqid++;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ break;
+ case nfserr_replay_cache:
+ status = nfsd4_replay_create_session(cr_ses, cs_slot);
+ fallthrough;
+ case nfserr_jukebox:
+ /* The server MUST NOT cache NFS4ERR_DELAY */
+ goto out_free_conn;
+ default:
goto out_cache_error;
}
- cs_slot->sl_seqid++;
- cr_ses->seqid = cs_slot->sl_seqid;
/* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
if (conf) {
@@ -3859,10 +3861,8 @@ nfsd4_create_session(struct svc_rqst *rqstp,
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = mark_client_expired_locked(old);
- if (status) {
- old = NULL;
- goto out_cache_error;
- }
+ if (status)
+ goto out_expired_error;
trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
@@ -3894,6 +3894,17 @@ nfsd4_create_session(struct svc_rqst *rqstp,
expire_client(old);
return status;
+out_expired_error:
+ old = NULL;
+ /*
+ * Revert the slot seq_nr change so the server will process
+ * the client's resend instead of returning a cached response.
+ */
+ if (status == nfserr_jukebox) {
+ cs_slot->sl_seqid--;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ goto out_free_conn;
+ }
out_cache_error:
nfsd4_cache_create_session(cr_ses, cs_slot, status);
out_free_conn:
@@ -6602,7 +6613,7 @@ deleg_reaper(struct nfsd_net *nn)
list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
- atomic_inc(&clp->cl_rpc_users);
+ kref_get(&clp->cl_nfsdfs.cl_ref);
set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index fac938f563ad..1955481832e0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3490,11 +3490,13 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct dentry *dentry, const u32 *bmval,
int ignore_crossmnt)
{
+ DECLARE_BITMAP(attr_bitmap, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
int starting_len = xdr->buf->len;
__be32 *attrlen_p, status;
int attrlen_offset;
+ u32 attrmask[3];
int err;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
@@ -3502,10 +3504,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
- union {
- u32 attrmask[3];
- unsigned long mask[2];
- } u;
unsigned long bit;
bool file_modified = false;
u64 size = 0;
@@ -3521,20 +3519,19 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
/*
* Make a local copy of the attribute bitmap that can be modified.
*/
- memset(&u, 0, sizeof(u));
- u.attrmask[0] = bmval[0];
- u.attrmask[1] = bmval[1];
- u.attrmask[2] = bmval[2];
+ attrmask[0] = bmval[0];
+ attrmask[1] = bmval[1];
+ attrmask[2] = bmval[2];
args.rdattr_err = 0;
if (exp->ex_fslocs.migrated) {
- status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
- &u.attrmask[2], &args.rdattr_err);
+ status = fattr_handle_absent_fs(&attrmask[0], &attrmask[1],
+ &attrmask[2], &args.rdattr_err);
if (status)
goto out;
}
args.size = 0;
- if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
&file_modified, &size);
if (status)
@@ -3553,16 +3550,16 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (!(args.stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
- u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
- if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ if ((attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
- (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ (attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &args.statfs);
if (err)
goto out_nfserr;
}
- if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+ if ((attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
!fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
@@ -3577,10 +3574,10 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args.fhp = fhp;
args.acl = NULL;
- if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+ if (attrmask[0] & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
- u.attrmask[0] &= ~FATTR4_WORD0_ACL;
+ attrmask[0] &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
@@ -3592,17 +3589,17 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
args.context = NULL;
- if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
- u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
&args.context, &args.contextlen);
else
err = -EOPNOTSUPP;
args.contextsupport = (err == 0);
- if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
- u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
@@ -3610,8 +3607,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
/* attrmask */
- status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
- u.attrmask[1], u.attrmask[2]);
+ status = nfsd4_encode_bitmap4(xdr, attrmask[0], attrmask[1],
+ attrmask[2]);
if (status)
goto out;
@@ -3620,7 +3617,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!attrlen_p)
goto out_resource;
- for_each_set_bit(bit, (const unsigned long *)&u.mask,
+ bitmap_from_arr32(attr_bitmap, attrmask,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+ for_each_set_bit(bit, attr_bitmap,
ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
if (status != nfs_ok)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 01c6f3445646..2ed0fcf879fd 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -68,7 +68,7 @@ struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
const struct nfsd4_callback_ops *cb_ops;
- struct delayed_work cb_work;
+ struct work_struct cb_work;
int cb_seq_status;
int cb_status;
bool cb_need_restart;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6a9464262fae..2e41eb4c3cec 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1852,7 +1852,7 @@ retry:
trap = lock_rename(tdentry, fdentry);
if (IS_ERR(trap)) {
err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
- goto out;
+ goto out_want_write;
}
err = fh_fill_pre_attrs(ffhp);
if (err != nfs_ok)
@@ -1922,6 +1922,7 @@ retry:
}
out_unlock:
unlock_rename(tdentry, fdentry);
+out_want_write:
fh_drop_write(ffhp);
/*
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index bc846b904b68..aee40db7a036 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -240,7 +240,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12
static unsigned char
-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
index cdfdf51e55d7..7bc31d69f680 100644
--- a/fs/ntfs3/Kconfig
+++ b/fs/ntfs3/Kconfig
@@ -46,3 +46,12 @@ config NTFS3_FS_POSIX_ACL
NOTE: this is linux only feature. Windows will ignore these ACLs.
If you don't know what Access Control Lists are, say N.
+
+config NTFS_FS
+ tristate "NTFS file system support"
+ select NTFS3_FS
+ select BUFFER_HEAD
+ select NLS
+ help
+ This config option is here only for backward compatibility. NTFS
+ filesystem is now handled by the NTFS3 driver.
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 5cf3d9decf64..263635199b60 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -616,4 +616,11 @@ const struct file_operations ntfs_dir_operations = {
.compat_ioctl = ntfs_compat_ioctl,
#endif
};
+
+const struct file_operations ntfs_legacy_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = ntfs_readdir,
+ .open = ntfs_file_open,
+};
// clang-format on
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 5418662c80d8..b73969e05052 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -1236,4 +1236,12 @@ const struct file_operations ntfs_file_operations = {
.fallocate = ntfs_fallocate,
.release = ntfs_file_release,
};
+
+const struct file_operations ntfs_legacy_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = ntfs_file_read_iter,
+ .splice_read = ntfs_file_splice_read,
+ .open = ntfs_file_open,
+ .release = ntfs_file_release,
+};
// clang-format on
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb7a8c9fba01..d273eda1cf45 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -440,7 +440,10 @@ end_enum:
* Usually a hard links to directories are disabled.
*/
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
ni->i_valid = 0;
} else if (S_ISLNK(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
@@ -450,7 +453,10 @@ end_enum:
} else if (S_ISREG(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
if (ino != MFT_REC_MFT)
@@ -1614,7 +1620,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
if (S_ISDIR(mode)) {
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
} else if (S_ISLNK(mode)) {
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
@@ -1623,7 +1632,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
init_rwsem(&ni->file.run_lock);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 79356fd29a14..5f4d288c6adf 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -493,6 +493,7 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
struct ntfs_fnd *fnd);
bool dir_is_empty(struct inode *dir);
extern const struct file_operations ntfs_dir_operations;
+extern const struct file_operations ntfs_legacy_dir_operations;
/* Globals from file.c */
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
@@ -507,6 +508,7 @@ long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
extern const struct inode_operations ntfs_special_inode_operations;
extern const struct inode_operations ntfs_file_inode_operations;
extern const struct file_operations ntfs_file_operations;
+extern const struct file_operations ntfs_legacy_file_operations;
/* Globals from frecord.c */
void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
@@ -1154,4 +1156,6 @@ static inline void le64_sub_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) - val);
}
+bool is_legacy_ntfs(struct super_block *sb);
+
#endif /* _LINUX_NTFS3_NTFS_FS_H */
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 9df7c20d066f..b26d95a8d327 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -408,6 +408,12 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
struct ntfs_mount_options *new_opts = fc->fs_private;
int ro_rw;
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ if (is_legacy_ntfs(sb)) {
+ fc->sb_flags |= SB_RDONLY;
+ goto out;
+ }
+
ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
errorf(fc,
@@ -427,8 +433,6 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
fc,
"ntfs3: Cannot use different iocharset when remounting!");
- sync_filesystem(sb);
-
if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
!new_opts->force) {
errorf(fc,
@@ -436,6 +440,8 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return -EINVAL;
}
+out:
+ sync_filesystem(sb);
swap(sbi->options, fc->fs_private);
return 0;
@@ -1613,6 +1619,8 @@ load_root:
}
#endif
+ if (is_legacy_ntfs(sb))
+ sb->s_flags |= SB_RDONLY;
return 0;
put_inode_out:
@@ -1730,7 +1738,7 @@ static const struct fs_context_operations ntfs_context_ops = {
* This will called when mount/remount. We will first initialize
* options so that if remount we can use just that.
*/
-static int ntfs_init_fs_context(struct fs_context *fc)
+static int __ntfs_init_fs_context(struct fs_context *fc)
{
struct ntfs_mount_options *opts;
struct ntfs_sb_info *sbi;
@@ -1778,6 +1786,11 @@ free_opts:
return -ENOMEM;
}
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+ return __ntfs_init_fs_context(fc);
+}
+
static void ntfs3_kill_sb(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
@@ -1798,6 +1811,50 @@ static struct file_system_type ntfs_fs_type = {
.kill_sb = ntfs3_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
+
+#if IS_ENABLED(CONFIG_NTFS_FS)
+static int ntfs_legacy_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+
+ ret = __ntfs_init_fs_context(fc);
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ fc->sb_flags |= SB_RDONLY;
+ return ret;
+}
+
+static struct file_system_type ntfs_legacy_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ntfs",
+ .init_fs_context = ntfs_legacy_init_fs_context,
+ .parameters = ntfs_fs_parameters,
+ .kill_sb = ntfs3_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+};
+MODULE_ALIAS_FS("ntfs");
+
+static inline void register_as_ntfs_legacy(void)
+{
+ int err = register_filesystem(&ntfs_legacy_fs_type);
+ if (err)
+ pr_warn("ntfs3: Failed to register legacy ntfs filesystem driver: %d\n", err);
+}
+
+static inline void unregister_as_ntfs_legacy(void)
+{
+ unregister_filesystem(&ntfs_legacy_fs_type);
+}
+bool is_legacy_ntfs(struct super_block *sb)
+{
+ return sb->s_type == &ntfs_legacy_fs_type;
+}
+#else
+static inline void register_as_ntfs_legacy(void) {}
+static inline void unregister_as_ntfs_legacy(void) {}
+bool is_legacy_ntfs(struct super_block *sb) { return false; }
+#endif
+
+
// clang-format on
static int __init init_ntfs_fs(void)
@@ -1832,6 +1889,7 @@ static int __init init_ntfs_fs(void)
goto out1;
}
+ register_as_ntfs_legacy();
err = register_filesystem(&ntfs_fs_type);
if (err)
goto out;
@@ -1849,6 +1907,7 @@ static void __exit exit_ntfs_fs(void)
rcu_barrier();
kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
+ unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
#ifdef CONFIG_PROC_FS
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index bd08616ed8ba..7b4db9c56e6a 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -5,7 +5,7 @@
obj-y += proc.o
-CFLAGS_task_mmu.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_task_mmu.o += -Wno-override-init
proc-y := nommu.o task_nommu.o
proc-$(CONFIG_MMU) := task_mmu.o
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
index 902b326e1e56..87dcaae32ff8 100644
--- a/fs/proc/bootconfig.c
+++ b/fs/proc/bootconfig.c
@@ -62,12 +62,12 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
break;
dst += ret;
}
- if (ret >= 0 && boot_command_line[0]) {
- ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
- boot_command_line);
- if (ret > 0)
- dst += ret;
- }
+ }
+ if (cmdline_has_extra_options() && ret >= 0 && boot_command_line[0]) {
+ ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
+ boot_command_line);
+ if (ret > 0)
+ dst += ret;
}
out:
kfree(key);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 195b077c0fac..9223856c934b 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -67,7 +67,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
*/
ppage = pfn_to_online_page(pfn);
- if (!ppage || PageSlab(ppage) || page_has_type(ppage))
+ if (!ppage)
pcount = 0;
else
pcount = page_mapcount(ppage);
@@ -124,11 +124,8 @@ u64 stable_page_flags(struct page *page)
/*
* pseudo flags for the well known (anonymous) memory mapped pages
- *
- * Note that page->_mapcount is overloaded in SLAB, so the
- * simple test in page_mapped() is not enough.
*/
- if (!PageSlab(page) && page_mapped(page))
+ if (page_mapped(page))
u |= 1 << KPF_MMAP;
if (PageAnon(page))
u |= 1 << KPF_ANON;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 6474529c4253..e539ccd39e1e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2589,7 +2589,7 @@ static void journal_list_init(struct super_block *sb)
static void release_journal_dev(struct reiserfs_journal *journal)
{
if (journal->j_bdev_file) {
- fput(journal->j_bdev_file);
+ bdev_fput(journal->j_bdev_file);
journal->j_bdev_file = NULL;
}
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 2be227532f39..2cbb92462074 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -594,7 +594,7 @@ static void romfs_kill_sb(struct super_block *sb)
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
#endif
}
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index a0017724d523..0ff2491c311d 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -417,6 +417,7 @@ smb2_close_cached_fid(struct kref *ref)
{
struct cached_fid *cfid = container_of(ref, struct cached_fid,
refcount);
+ int rc;
spin_lock(&cfid->cfids->cfid_list_lock);
if (cfid->on_list) {
@@ -430,9 +431,10 @@ smb2_close_cached_fid(struct kref *ref)
cfid->dentry = NULL;
if (cfid->is_open) {
- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
cfid->fid.volatile_fid);
- atomic_dec(&cfid->tcon->num_remote_opens);
+ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
+ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
}
free_cached_dir(cfid);
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 226d4835c92d..c71ae5c04306 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -250,6 +250,8 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
@@ -676,6 +678,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
}
#endif /* CONFIG_CIFS_STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
atomic_set(&tcon->num_smbs_sent, 0);
spin_lock(&tcon->stat_lock);
@@ -755,6 +759,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
}
#endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
i++;
seq_printf(m, "\n%d) %s", i, tcon->tree_name);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index aa6f1ecb7c0e..39277c37185c 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -156,6 +156,7 @@ struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
struct workqueue_struct *deferredclose_wq;
+struct workqueue_struct *serverclose_wq;
__u32 cifs_lock_secret;
/*
@@ -388,6 +389,7 @@ cifs_alloc_inode(struct super_block *sb)
* server, can not assume caching of file data or metadata.
*/
cifs_set_oplock_level(cifs_inode, 0);
+ cifs_inode->lease_granted = false;
cifs_inode->flags = 0;
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
@@ -738,6 +740,8 @@ static void cifs_umount_begin(struct super_block *sb)
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_umount);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to umount this and woken up
@@ -1888,6 +1892,13 @@ init_cifs(void)
goto out_destroy_cifsoplockd_wq;
}
+ serverclose_wq = alloc_workqueue("serverclose",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!serverclose_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_serverclose_wq;
+ }
+
rc = cifs_init_inodecache();
if (rc)
goto out_destroy_deferredclose_wq;
@@ -1962,6 +1973,8 @@ out_destroy_decrypt_wq:
destroy_workqueue(decrypt_wq);
out_destroy_cifsiod_wq:
destroy_workqueue(cifsiod_wq);
+out_destroy_serverclose_wq:
+ destroy_workqueue(serverclose_wq);
out_clean_proc:
cifs_proc_clean();
return rc;
@@ -1991,6 +2004,7 @@ exit_cifs(void)
destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(decrypt_wq);
destroy_workqueue(fileinfo_put_wq);
+ destroy_workqueue(serverclose_wq);
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 7ed9d05f6890..6ff35570db81 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -442,10 +442,10 @@ struct smb_version_operations {
/* set fid protocol-specific info */
void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
/* close a file */
- void (*close)(const unsigned int, struct cifs_tcon *,
+ int (*close)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *);
/* close a file, returning file attributes and timestamps */
- void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+ int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *pfile_info);
/* send a flush request to the server */
int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
@@ -1077,6 +1077,7 @@ struct cifs_ses {
and after mount option parsing we fill it */
char *domainName;
char *password;
+ char *password2; /* When key rotation used, new password may be set before it expires */
char workstation_name[CIFS_MAX_WORKSTATION_LEN];
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
@@ -1189,6 +1190,7 @@ struct cifs_fattr {
*/
struct cifs_tcon {
struct list_head tcon_list;
+ int debug_id; /* Debugging for tracing */
int tc_count;
struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
@@ -1275,13 +1277,14 @@ struct cifs_tcon {
__u32 max_cached_dirs;
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
+ bool fscache_acquired; /* T if we've tried acquiring a cookie */
struct fscache_volume *fscache; /* cookie for share */
+ struct mutex fscache_lock; /* Prevent regetting a cookie */
#endif
struct list_head pending_opens; /* list of incomplete opens */
struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
- struct list_head dfs_ses_list;
struct delayed_work dfs_cache_work;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
@@ -1440,6 +1443,7 @@ struct cifsFileInfo {
bool swapfile:1;
bool oplock_break_cancelled:1;
bool status_file_deleted:1; /* file has been deleted */
+ bool offload:1; /* offload final part of _put to a wq */
unsigned int oplock_epoch; /* epoch from the lease break */
__u32 oplock_level; /* oplock/lease level from the lease break */
int count;
@@ -1448,6 +1452,7 @@ struct cifsFileInfo {
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
+ struct work_struct serverclose; /* work for serverclose */
struct delayed_work deferred;
bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
char *symlink_target;
@@ -1804,7 +1809,6 @@ struct cifs_mount_ctx {
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct list_head dfs_ses_list;
};
static inline void __free_dfs_info_param(struct dfs_info3_param *param)
@@ -2105,6 +2109,7 @@ extern struct workqueue_struct *decrypt_wq;
extern struct workqueue_struct *fileinfo_put_wq;
extern struct workqueue_struct *cifsoplockd_wq;
extern struct workqueue_struct *deferredclose_wq;
+extern struct workqueue_struct *serverclose_wq;
extern __u32 cifs_lock_secret;
extern mempool_t *cifs_sm_req_poolp;
@@ -2324,4 +2329,14 @@ struct smb2_compound_vars {
struct kvec ea_iov;
};
+static inline bool cifs_ses_exiting(struct cifs_ses *ses)
+{
+ bool ret;
+
+ spin_lock(&ses->ses_lock);
+ ret = ses->ses_status == SES_EXITING;
+ spin_unlock(&ses->ses_lock);
+ return ret;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c0513fbb8a59..c46d418c1c0c 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -882,7 +882,7 @@ typedef struct smb_com_open_rsp {
__u8 OplockLevel;
__u16 Fid;
__le32 CreateAction;
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -2266,7 +2266,7 @@ typedef struct {
/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
/******************************************************************************/
typedef struct { /* data block encoding of response to level 263 QPathInfo */
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 0723e1b57256..fbc358c09da3 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -303,7 +303,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
struct TCP_Server_Info *primary_server);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
-extern void cifs_put_tcon(struct cifs_tcon *tcon);
+extern void cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
extern void cifs_release_automount_timer(void);
@@ -530,8 +530,9 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
extern struct cifs_ses *sesInfoAlloc(void);
extern void sesInfoFree(struct cifs_ses *);
-extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
-extern void tconInfoFree(struct cifs_tcon *);
+extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled,
+ enum smb3_tcon_ref_trace trace);
+extern void tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number);
@@ -721,35 +722,33 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
return options;
}
-struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
-void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
-/* Put references of @ses and @ses->dfs_root_ses */
+/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
- struct cifs_ses *rses = ses->dfs_root_ses;
+ struct cifs_ses *next;
- __cifs_put_smb_ses(ses);
- if (rses)
- __cifs_put_smb_ses(rses);
+ do {
+ next = ses->dfs_root_ses;
+ __cifs_put_smb_ses(ses);
+ } while ((ses = next));
}
-/* Get an active reference of @ses and @ses->dfs_root_ses.
+/* Get an active reference of @ses and its children.
*
* NOTE: make sure to call this function when incrementing reference count of
* @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
* will also get its reference count incremented.
*
- * cifs_put_smb_ses() will put both references, so call it when you're done.
+ * cifs_put_smb_ses() will put all references, so call it when you're done.
*/
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
- ses->ses_count++;
- if (ses->dfs_root_ses)
- ses->dfs_root_ses->ses_count++;
+ for (; ses; ses = ses->dfs_root_ses)
+ ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 5aee55551573..23b5709ddc31 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -5854,10 +5854,8 @@ SetEARetry:
parm_data->list.EA_flags = 0;
/* we checked above that name len is less than 255 */
parm_data->list.name_len = (__u8)name_len;
- /* EA names are always ASCII */
- if (ea_name)
- strncpy(parm_data->list.name, ea_name, name_len);
- parm_data->list.name[name_len] = '\0';
+ /* EA names are always ASCII and NUL-terminated */
+ strscpy(parm_data->list.name, ea_name ?: "", name_len + 1);
parm_data->list.value_len = cpu_to_le16(ea_value_len);
/* caller ensures that ea_value_len is less than 64K but
we need to ensure that it fits within the smb */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 9b85b5341822..7a16e12f5da8 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -175,6 +175,8 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
if (!ses->chans[i].server)
@@ -232,7 +234,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
- /* check if iface is still active */
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ continue;
+ }
+ spin_unlock(&ses->ses_lock);
+
spin_lock(&ses->chan_lock);
if (cifs_ses_get_chan_index(ses, server) ==
CIFS_INVAL_CHAN_INDEX) {
@@ -1860,6 +1868,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
ctx->sectype != ses->sectype)
return 0;
+ if (ctx->dfs_root_ses != ses->dfs_root_ses)
+ return 0;
+
/*
* If an existing session is limited to less channels than
* requested, it should not be reused
@@ -1932,7 +1943,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
}
/* no need to setup directory caching on IPC share, so pass in false */
- tcon = tcon_info_alloc(false);
+ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
if (tcon == NULL)
return -ENOMEM;
@@ -1949,7 +1960,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (rc) {
cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
goto out;
}
@@ -1963,31 +1974,6 @@ out:
return rc;
}
-/**
- * cifs_free_ipc - helper to release the session IPC tcon
- * @ses: smb session to unmount the IPC from
- *
- * Needs to be called everytime a session is destroyed.
- *
- * On session close, the IPC is closed and the server must release all tcons of the session.
- * No need to send a tree disconnect here.
- *
- * Besides, it will make the server to not close durable and resilient files on session close, as
- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
- */
-static int
-cifs_free_ipc(struct cifs_ses *ses)
-{
- struct cifs_tcon *tcon = ses->tcon_ipc;
-
- if (tcon == NULL)
- return 0;
-
- tconInfoFree(tcon);
- ses->tcon_ipc = NULL;
- return 0;
-}
-
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
@@ -2019,48 +2005,52 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
void __cifs_put_smb_ses(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
+ struct cifs_tcon *tcon;
unsigned int xid;
size_t i;
+ bool do_logoff;
int rc;
+ spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_EXITING) {
+ cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
+ __func__, ses->Suid, ses->ses_count, ses->ses_status,
+ ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
+ if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
}
- spin_unlock(&ses->ses_lock);
+ /* ses_count can never go negative */
+ WARN_ON(ses->ses_count < 0);
- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
- cifs_dbg(FYI,
- "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
+ spin_lock(&ses->chan_lock);
+ cifs_chan_clear_need_reconnect(ses, server);
+ spin_unlock(&ses->chan_lock);
- spin_lock(&cifs_tcp_ses_lock);
- if (--ses->ses_count > 0) {
- spin_unlock(&cifs_tcp_ses_lock);
- return;
- }
- spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_GOOD)
- ses->ses_status = SES_EXITING;
+ do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
+ ses->ses_status = SES_EXITING;
+ tcon = ses->tcon_ipc;
+ ses->tcon_ipc = NULL;
spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock);
- /* ses_count can never go negative */
- WARN_ON(ses->ses_count < 0);
-
- spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_EXITING && server->ops->logoff) {
- spin_unlock(&ses->ses_lock);
- cifs_free_ipc(ses);
+ /*
+ * On session close, the IPC is closed and the server must release all
+ * tcons of the session. No need to send a tree disconnect here.
+ *
+ * Besides, it will make the server to not close durable and resilient
+ * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
+ * SMB2 LOGOFF Request.
+ */
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc);
+ if (do_logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
if (rc)
cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
__func__, rc);
_free_xid(xid);
- } else {
- spin_unlock(&ses->ses_lock);
- cifs_free_ipc(ses);
}
spin_lock(&cifs_tcp_ses_lock);
@@ -2193,6 +2183,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
++delim;
+ /* BB consider adding support for password2 (Key Rotation) for multiuser in future */
ctx->password = kstrndup(delim, len, GFP_KERNEL);
if (!ctx->password) {
cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
@@ -2216,6 +2207,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
kfree(ctx->username);
ctx->username = NULL;
kfree_sensitive(ctx->password);
+ /* no need to free ctx->password2 since not allocated in this path */
ctx->password = NULL;
goto out_key_put;
}
@@ -2327,6 +2319,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
if (!ses->password)
goto get_ses_fail;
}
+ /* ctx->password freed at unmount */
+ if (ctx->password2) {
+ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
+ if (!ses->password2)
+ goto get_ses_fail;
+ }
if (ctx->domainname) {
ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
if (!ses->domainName)
@@ -2373,9 +2371,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
+ if (ctx->dfs_root_ses)
+ cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
- if (ses->dfs_root_ses)
- ses->dfs_root_ses->ses_count++;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2434,6 +2432,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
continue;
}
++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_find);
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
@@ -2443,7 +2443,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
}
void
-cifs_put_tcon(struct cifs_tcon *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
@@ -2459,6 +2459,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace);
if (--tcon->tc_count > 0) {
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2495,7 +2496,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
_free_xid(xid);
cifs_fscache_release_super_cookie(tcon);
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
}
@@ -2549,7 +2550,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
nohandlecache = ctx->nohandlecache;
else
nohandlecache = true;
- tcon = tcon_info_alloc(!nohandlecache);
+ tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
@@ -2739,7 +2740,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
return tcon;
out_fail:
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail);
return ERR_PTR(rc);
}
@@ -2756,7 +2757,7 @@ cifs_put_tlink(struct tcon_link *tlink)
}
if (!IS_ERR(tlink_tcon(tlink)))
- cifs_put_tcon(tlink_tcon(tlink));
+ cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink);
kfree(tlink);
}
@@ -3321,11 +3322,14 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
int rc = 0;
if (mnt_ctx->tcon)
- cifs_put_tcon(mnt_ctx->tcon);
+ cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx);
else if (mnt_ctx->ses)
cifs_put_smb_ses(mnt_ctx->ses);
else if (mnt_ctx->server)
cifs_put_tcp_session(mnt_ctx->server, 0);
+ mnt_ctx->ses = NULL;
+ mnt_ctx->tcon = NULL;
+ mnt_ctx->server = NULL;
mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
free_xid(mnt_ctx->xid);
}
@@ -3604,8 +3608,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
bool isdfs;
int rc;
- INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
-
rc = dfs_mount_share(&mnt_ctx, &isdfs);
if (rc)
goto error;
@@ -3636,7 +3638,6 @@ out:
return rc;
error:
- dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
cifs_mount_put_conns(&mnt_ctx);
return rc;
}
@@ -3651,6 +3652,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
rc = cifs_mount_get_tcon(&mnt_ctx);
+ if (!rc) {
+ /*
+ * Prevent superblock from being created with any missing
+ * connections.
+ */
+ if (WARN_ON(!mnt_ctx.server))
+ rc = -EHOSTDOWN;
+ else if (WARN_ON(!mnt_ctx.ses))
+ rc = -EACCES;
+ else if (WARN_ON(!mnt_ctx.tcon))
+ rc = -ENOENT;
+ }
if (rc)
goto error;
@@ -3988,13 +4001,14 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
struct cifs_ses *ses;
struct cifs_tcon *tcon = NULL;
struct smb3_fs_context *ctx;
+ char *origin_fullpath = NULL;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
@@ -4018,6 +4032,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
ctx->sign = master_tcon->ses->sign;
ctx->seal = master_tcon->seal;
ctx->witness = master_tcon->use_witness;
+ ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
rc = cifs_set_vol_auth(ctx, master_tcon->ses);
if (rc) {
@@ -4037,12 +4052,39 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ spin_lock(&master_tcon->tc_lock);
+ if (master_tcon->origin_fullpath) {
+ spin_unlock(&master_tcon->tc_lock);
+ origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
+ if (IS_ERR(origin_fullpath)) {
+ tcon = ERR_CAST(origin_fullpath);
+ origin_fullpath = NULL;
+ cifs_put_smb_ses(ses);
+ goto out;
+ }
+ } else {
+ spin_unlock(&master_tcon->tc_lock);
+ }
+#endif
+
tcon = cifs_get_tcon(ses, ctx);
if (IS_ERR(tcon)) {
cifs_put_smb_ses(ses);
goto out;
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (origin_fullpath) {
+ spin_lock(&tcon->tc_lock);
+ tcon->origin_fullpath = origin_fullpath;
+ spin_unlock(&tcon->tc_lock);
+ origin_fullpath = NULL;
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
+ }
+#endif
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, ctx);
@@ -4051,11 +4093,23 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
out:
kfree(ctx->username);
kfree_sensitive(ctx->password);
+ kfree(origin_fullpath);
kfree(ctx);
return tcon;
}
+static struct cifs_tcon *
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+{
+ struct cifs_tcon *ret;
+
+ cifs_mount_lock();
+ ret = __cifs_construct_tcon(cifs_sb, fsuid);
+ cifs_mount_unlock();
+ return ret;
+}
+
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 449c59830039..3ec965547e3d 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -66,33 +66,20 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
}
/*
- * Track individual DFS referral servers used by new DFS mount.
- *
- * On success, their lifetime will be shared by final tcon (dfs_ses_list).
- * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
+ * Get an active reference of @ses so that next call to cifs_put_tcon() won't
+ * release it as any new DFS referrals must go through its IPC tcon.
*/
-static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct dfs_root_ses *root_ses;
struct cifs_ses *ses = mnt_ctx->ses;
if (ses) {
- root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
- if (!root_ses)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&root_ses->list);
-
spin_lock(&cifs_tcp_ses_lock);
cifs_smb_ses_inc_refcount(ses);
spin_unlock(&cifs_tcp_ses_lock);
- root_ses->ses = ses;
- list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
}
- /* Select new DFS referral server so that new referrals go through it */
ctx->dfs_root_ses = ses;
- return 0;
}
static inline int parse_dfs_target(struct smb3_fs_context *ctx,
@@ -185,11 +172,8 @@ again:
continue;
}
- if (is_refsrv) {
- rc = add_root_smb_session(mnt_ctx);
- if (rc)
- goto out;
- }
+ if (is_refsrv)
+ add_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
@@ -232,6 +216,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_tcon *tcon;
char *origin_fullpath;
+ bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
@@ -239,6 +224,18 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
return PTR_ERR(origin_fullpath);
rc = dfs_referral_walk(mnt_ctx);
+ if (!rc) {
+ /*
+ * Prevent superblock from being created with any missing
+ * connections.
+ */
+ if (WARN_ON(!mnt_ctx->server))
+ rc = -EHOSTDOWN;
+ else if (WARN_ON(!mnt_ctx->ses))
+ rc = -EACCES;
+ else if (WARN_ON(!mnt_ctx->tcon))
+ rc = -ENOENT;
+ }
if (rc)
goto out;
@@ -247,15 +244,14 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
if (!tcon->origin_fullpath) {
tcon->origin_fullpath = origin_fullpath;
origin_fullpath = NULL;
+ } else {
+ new_tcon = false;
}
spin_unlock(&tcon->tc_lock);
- if (list_empty(&tcon->dfs_ses_list)) {
- list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list);
+ if (new_tcon) {
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
- } else {
- dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
}
out:
@@ -298,7 +294,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
- ctx->dfs_root_ses = mnt_ctx->ses;
/*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
* try to get an DFS referral (even cached) to determine whether it is an DFS mount.
@@ -324,7 +319,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
*isdfs = true;
add_root_smb_session(mnt_ctx);
- return __dfs_mount_share(mnt_ctx);
+ rc = __dfs_mount_share(mnt_ctx);
+ dfs_put_root_smb_sessions(mnt_ctx);
+ return rc;
}
/* Update dfs referral path of superblock */
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index 875ab7ae57fc..e5c4dcf83750 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -7,7 +7,9 @@
#define _CIFS_DFS_H
#include "cifsglob.h"
+#include "cifsproto.h"
#include "fs_context.h"
+#include "dfs_cache.h"
#include "cifs_unicode.h"
#include <linux/namei.h>
@@ -114,11 +116,6 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
-struct dfs_root_ses {
- struct list_head list;
- struct cifs_ses *ses;
-};
-
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
@@ -133,20 +130,32 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
- return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
+ return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
cifs_remap(cifs_sb), path, ref, tl);
}
-static inline void dfs_put_root_smb_sessions(struct list_head *head)
+/*
+ * cifs_get_smb_ses() already guarantees an active reference of
+ * @ses->dfs_root_ses when a new session is created, so we need to put extra
+ * references of all DFS root sessions that were used across the mount process
+ * in dfs_mount_share().
+ */
+static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
{
- struct dfs_root_ses *root, *tmp;
+ const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_ses *ses = ctx->dfs_root_ses;
+ struct cifs_ses *cur;
+
+ if (!ses)
+ return;
- list_for_each_entry_safe(root, tmp, head, list) {
- list_del_init(&root->list);
- cifs_put_smb_ses(root->ses);
- kfree(root);
+ for (cur = ses; cur; cur = cur->dfs_root_ses) {
+ if (cur->dfs_root_ses)
+ cifs_put_smb_ses(cur->dfs_root_ses);
}
+ cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 508d831fabe3..11c8efecf7aa 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
-static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
+/* Refresh dfs referral of @ses and mark it for reconnect if needed */
+static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
{
struct TCP_Server_Info *server = ses->server;
DFS_CACHE_TGT_LIST(old_tl);
@@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
+ char *path = NULL;
int rc = 0;
xid = get_xid();
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath) {
+ path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
+ if (!path)
+ rc = -ENOMEM;
+ }
+ mutex_unlock(&server->refpath_lock);
+ if (!path)
+ goto out;
+
down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
@@ -1218,19 +1229,17 @@ out:
free_xid(xid);
dfs_cache_free_tgts(&old_tl);
dfs_cache_free_tgts(&new_tl);
- return rc;
+ kfree(path);
}
-static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+static inline void refresh_ses_referral(struct cifs_ses *ses)
{
- struct TCP_Server_Info *server = tcon->ses->server;
- struct cifs_ses *ses = tcon->ses;
+ __refresh_ses_referral(ses, false);
+}
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
- mutex_unlock(&server->refpath_lock);
- return 0;
+static inline void force_refresh_ses_referral(struct cifs_ses *ses)
+{
+ __refresh_ses_referral(ses, true);
}
/**
@@ -1271,34 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- return refresh_tcon(tcon, true);
+ force_refresh_ses_referral(tcon->ses);
+ return 0;
}
/* Refresh all DFS referrals related to DFS tcon */
void dfs_cache_refresh(struct work_struct *work)
{
- struct TCP_Server_Info *server;
- struct dfs_root_ses *rses;
struct cifs_tcon *tcon;
struct cifs_ses *ses;
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
- ses = tcon->ses;
- server = ses->server;
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
- mutex_unlock(&server->refpath_lock);
-
- list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
- ses = rses->ses;
- server = ses->server;
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
- mutex_unlock(&server->refpath_lock);
- }
+ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
+ refresh_ses_referral(ses);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 89333d9bce36..864b194dbaa0 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -189,6 +189,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
*oplock = 0;
if (tcon->ses->server->oplocks)
@@ -200,6 +201,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
return PTR_ERR(full_path);
}
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (oflags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
@@ -276,6 +281,8 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
desired_access |= GENERIC_READ; /* is this too little? */
if (OPEN_FMODE(oflags) & FMODE_WRITE)
desired_access |= GENERIC_WRITE;
+ if (rdwr_for_fscache == 1)
+ desired_access |= GENERIC_READ;
disposition = FILE_OVERWRITE_IF;
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
@@ -304,6 +311,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -317,8 +325,15 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
rc = server->ops->open(xid, &oparms, oplock, buf);
if (rc) {
cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access &= ~GENERIC_READ;
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
goto out;
}
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
@@ -612,11 +627,18 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
goto mknod_out;
}
+ trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
+
rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
full_path, mode,
device_number);
mknod_out:
+ if (rc)
+ trace_smb3_mknod_err(xid, tcon->ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
+
free_dentry_path(page);
free_xid(xid);
cifs_put_tlink(tlink);
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 16aadce492b2..9be37d0fe724 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -206,12 +206,12 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
*/
}
-static inline int cifs_convert_flags(unsigned int flags)
+static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
{
if ((flags & O_ACCMODE) == O_RDONLY)
return GENERIC_READ;
else if ((flags & O_ACCMODE) == O_WRONLY)
- return GENERIC_WRITE;
+ return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
else if ((flags & O_ACCMODE) == O_RDWR) {
/* GENERIC_ALL is too much permission to request
can cause unnecessary access denied on create */
@@ -348,11 +348,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
int create_options = CREATE_NOT_DIR;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
if (!server->ops->open)
return -ENOSYS;
- desired_access = cifs_convert_flags(f_flags);
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
+ desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
/*********************************************************************
* open flag mapping table:
@@ -389,6 +394,7 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
if (f_flags & O_DIRECT)
create_options |= CREATE_NO_BUFFER;
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -400,8 +406,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
};
rc = server->ops->open(xid, &oparms, oplock, buf);
- if (rc)
+ if (rc) {
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access = cifs_convert_flags(f_flags, 0);
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
return rc;
+ }
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
/* TODO: Add support for calling posix query info but with passing in fid */
if (tcon->unix_ext)
@@ -445,6 +459,7 @@ cifs_down_write(struct rw_semaphore *sem)
}
static void cifsFileInfo_put_work(struct work_struct *work);
+void serverclose_work(struct work_struct *work);
struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock,
@@ -491,6 +506,7 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
+ INIT_WORK(&cfile->serverclose, serverclose_work);
INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
@@ -582,6 +598,40 @@ static void cifsFileInfo_put_work(struct work_struct *work)
cifsFileInfo_put_final(cifs_file);
}
+void serverclose_work(struct work_struct *work)
+{
+ struct cifsFileInfo *cifs_file = container_of(work,
+ struct cifsFileInfo, serverclose);
+
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+
+ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc = 0;
+ int retries = 0;
+ int MAX_RETRIES = 4;
+
+ do {
+ if (server->ops->close_getattr)
+ rc = server->ops->close_getattr(0, tcon, cifs_file);
+ else if (server->ops->close)
+ rc = server->ops->close(0, tcon, &cifs_file->fid);
+
+ if (rc == -EBUSY || rc == -EAGAIN) {
+ retries++;
+ msleep(250);
+ }
+ } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
+ );
+
+ if (retries == MAX_RETRIES)
+ pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
+
+ if (cifs_file->offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
+}
+
/**
* cifsFileInfo_put - release a reference of file priv data
*
@@ -622,10 +672,13 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
struct cifs_fid fid = {};
struct cifs_pending_open open;
bool oplock_break_cancelled;
+ bool serverclose_offloaded = false;
spin_lock(&tcon->open_file_lock);
spin_lock(&cifsi->open_file_lock);
spin_lock(&cifs_file->file_info_lock);
+
+ cifs_file->offload = offload;
if (--cifs_file->count > 0) {
spin_unlock(&cifs_file->file_info_lock);
spin_unlock(&cifsi->open_file_lock);
@@ -667,13 +720,20 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int xid;
+ int rc = 0;
xid = get_xid();
if (server->ops->close_getattr)
- server->ops->close_getattr(xid, tcon, cifs_file);
+ rc = server->ops->close_getattr(xid, tcon, cifs_file);
else if (server->ops->close)
- server->ops->close(xid, tcon, &cifs_file->fid);
+ rc = server->ops->close(xid, tcon, &cifs_file->fid);
_free_xid(xid);
+
+ if (rc == -EBUSY || rc == -EAGAIN) {
+ // Server close failed, hence offloading it as an async op
+ queue_work(serverclose_wq, &cifs_file->serverclose);
+ serverclose_offloaded = true;
+ }
}
if (oplock_break_cancelled)
@@ -681,10 +741,15 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
cifs_del_pending_open(&open);
- if (offload)
- queue_work(fileinfo_put_wq, &cifs_file->put);
- else
- cifsFileInfo_put_final(cifs_file);
+ // if serverclose has been offloaded to wq (on failure), it will
+ // handle offloading put as well. If serverclose not offloaded,
+ // we need to handle offloading put here.
+ if (!serverclose_offloaded) {
+ if (offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
+ }
}
int cifs_open(struct inode *inode, struct file *file)
@@ -834,11 +899,11 @@ int cifs_open(struct inode *inode, struct file *file)
use_cache:
fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
file->f_mode & FMODE_WRITE);
- if (file->f_flags & O_DIRECT &&
- (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
- file->f_flags & O_APPEND))
- cifs_invalidate_cache(file_inode(file),
- FSCACHE_INVAL_DIO_WRITE);
+ if (!(file->f_flags & O_DIRECT))
+ goto out;
+ if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
+ goto out;
+ cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
out:
free_dentry_path(page);
@@ -903,6 +968,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
int disposition = FILE_OPEN;
int create_options = CREATE_NOT_DIR;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
xid = get_xid();
mutex_lock(&cfile->fh_mutex);
@@ -966,7 +1032,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
- desired_access = cifs_convert_flags(cfile->f_flags);
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
+ desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (cfile->f_flags & O_SYNC)
@@ -978,6 +1048,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -1003,6 +1074,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
/* indicate that we need to relock the file */
oparms.reconnect = true;
}
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access = cifs_convert_flags(cfile->f_flags, 0);
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
if (rc) {
mutex_unlock(&cfile->fh_mutex);
@@ -1011,6 +1087,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
goto reopen_error_exit;
}
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
reopen_success:
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index bdcbe6ff2739..3bbac925d076 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -37,7 +37,7 @@
#include "rfc1002pdu.h"
#include "fs_context.h"
-static DEFINE_MUTEX(cifs_mount_mutex);
+DEFINE_MUTEX(cifs_mount_mutex);
static const match_table_t cifs_smb_version_tokens = {
{ Smb_1, SMB1_VERSION_STRING },
@@ -162,6 +162,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_string("username", Opt_user),
fsparam_string("pass", Opt_pass),
fsparam_string("password", Opt_pass),
+ fsparam_string("password2", Opt_pass2),
fsparam_string("ip", Opt_ip),
fsparam_string("addr", Opt_ip),
fsparam_string("domain", Opt_domain),
@@ -345,6 +346,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->nodename = NULL;
new_ctx->username = NULL;
new_ctx->password = NULL;
+ new_ctx->password2 = NULL;
new_ctx->server_hostname = NULL;
new_ctx->domainname = NULL;
new_ctx->UNC = NULL;
@@ -357,6 +359,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(prepath);
DUP_CTX_STR(username);
DUP_CTX_STR(password);
+ DUP_CTX_STR(password2);
DUP_CTX_STR(server_hostname);
DUP_CTX_STR(UNC);
DUP_CTX_STR(source);
@@ -745,6 +748,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
/* set the port that we got earlier */
cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+ if (ctx->uid_specified && !ctx->forceuid_specified) {
+ ctx->override_uid = 1;
+ pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
+ }
+
+ if (ctx->gid_specified && !ctx->forcegid_specified) {
+ ctx->override_gid = 1;
+ pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
+ }
+
if (ctx->override_uid && !ctx->uid_specified) {
ctx->override_uid = 0;
pr_notice("ignoring forceuid mount option specified with no uid= option\n");
@@ -783,9 +796,9 @@ static int smb3_get_tree(struct fs_context *fc)
if (err)
return err;
- mutex_lock(&cifs_mount_mutex);
+ cifs_mount_lock();
ret = smb3_get_tree_common(fc);
- mutex_unlock(&cifs_mount_mutex);
+ cifs_mount_unlock();
return ret;
}
@@ -905,6 +918,8 @@ static int smb3_reconfigure(struct fs_context *fc)
else {
kfree_sensitive(ses->password);
ses->password = kstrdup(ctx->password, GFP_KERNEL);
+ kfree_sensitive(ses->password2);
+ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
}
STEAL_STRING(cifs_sb, ctx, domainname);
STEAL_STRING(cifs_sb, ctx, nodename);
@@ -1014,12 +1029,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->override_uid = 0;
else
ctx->override_uid = 1;
+ ctx->forceuid_specified = true;
break;
case Opt_forcegid:
if (result.negated)
ctx->override_gid = 0;
else
ctx->override_gid = 1;
+ ctx->forcegid_specified = true;
break;
case Opt_perm:
if (result.negated)
@@ -1305,6 +1322,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
}
break;
+ case Opt_pass2:
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
+ if (strlen(param->string) == 0)
+ break;
+
+ ctx->password2 = kstrdup(param->string, GFP_KERNEL);
+ if (ctx->password2 == NULL) {
+ cifs_errorf(fc, "OOM when copying password2 string\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
case Opt_ip:
if (strlen(param->string) == 0) {
ctx->got_ip = false;
@@ -1608,6 +1637,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_parse_mount_err:
kfree_sensitive(ctx->password);
ctx->password = NULL;
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
return -EINVAL;
}
@@ -1713,6 +1744,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->username = NULL;
kfree_sensitive(ctx->password);
ctx->password = NULL;
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
kfree(ctx->server_hostname);
ctx->server_hostname = NULL;
kfree(ctx->UNC);
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index 7863f2248c4d..cf577ec0dd0a 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -145,6 +145,7 @@ enum cifs_param {
Opt_source,
Opt_user,
Opt_pass,
+ Opt_pass2,
Opt_ip,
Opt_domain,
Opt_srcaddr,
@@ -164,6 +165,8 @@ enum cifs_param {
};
struct smb3_fs_context {
+ bool forceuid_specified;
+ bool forcegid_specified;
bool uid_specified;
bool cruid_specified;
bool gid_specified;
@@ -177,6 +180,7 @@ struct smb3_fs_context {
char *username;
char *password;
+ char *password2;
char *domainname;
char *source;
char *server_hostname;
@@ -304,4 +308,16 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
#define MAX_CACHED_FIDS 16
extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+extern struct mutex cifs_mount_mutex;
+
+static inline void cifs_mount_lock(void)
+{
+ mutex_lock(&cifs_mount_mutex);
+}
+
+static inline void cifs_mount_unlock(void)
+{
+ mutex_unlock(&cifs_mount_mutex);
+}
+
#endif
diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
index c4a3cb736881..1a895e6243ee 100644
--- a/fs/smb/client/fscache.c
+++ b/fs/smb/client/fscache.c
@@ -12,6 +12,16 @@
#include "cifs_fs_sb.h"
#include "cifsproto.h"
+/*
+ * Key for fscache inode. [!] Contents must match comparisons in cifs_find_inode().
+ */
+struct cifs_fscache_inode_key {
+
+ __le64 uniqueid; /* server inode number */
+ __le64 createtime; /* creation time on server */
+ u8 type; /* S_IFMT file type */
+} __packed;
+
static void cifs_fscache_fill_volume_coherency(
struct cifs_tcon *tcon,
struct cifs_fscache_volume_coherency_data *cd)
@@ -33,12 +43,23 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
char *key;
int ret = -ENOMEM;
+ if (tcon->fscache_acquired)
+ return 0;
+
+ mutex_lock(&tcon->fscache_lock);
+ if (tcon->fscache_acquired) {
+ mutex_unlock(&tcon->fscache_lock);
+ return 0;
+ }
+ tcon->fscache_acquired = true;
+
tcon->fscache = NULL;
switch (sa->sa_family) {
case AF_INET:
case AF_INET6:
break;
default:
+ mutex_unlock(&tcon->fscache_lock);
cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
return -EINVAL;
}
@@ -47,6 +68,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
sharename = extract_sharename(tcon->tree_name);
if (IS_ERR(sharename)) {
+ mutex_unlock(&tcon->fscache_lock);
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
return PTR_ERR(sharename);
}
@@ -72,6 +94,11 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
}
pr_err("Cache volume key already in use (%s)\n", key);
vcookie = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_collision);
+ } else {
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_okay);
}
tcon->fscache = vcookie;
@@ -80,6 +107,7 @@ out_2:
kfree(key);
out:
kfree(sharename);
+ mutex_unlock(&tcon->fscache_lock);
return ret;
}
@@ -92,20 +120,26 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
cifs_fscache_fill_volume_coherency(tcon, &cd);
fscache_relinquish_volume(tcon->fscache, &cd, false);
tcon->fscache = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_relinq);
}
void cifs_fscache_get_inode_cookie(struct inode *inode)
{
struct cifs_fscache_inode_coherency_data cd;
+ struct cifs_fscache_inode_key key;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ key.uniqueid = cpu_to_le64(cifsi->uniqueid);
+ key.createtime = cpu_to_le64(cifsi->createtime);
+ key.type = (inode->i_mode & S_IFMT) >> 12;
cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
cifsi->netfs.cache =
fscache_acquire_cookie(tcon->fscache, 0,
- &cifsi->uniqueid, sizeof(cifsi->uniqueid),
+ &key, sizeof(key),
&cd, sizeof(cd),
i_size_read(&cifsi->netfs.inode));
if (cifsi->netfs.cache)
diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
index a3d73720914f..1f2ea9f5cc9a 100644
--- a/fs/smb/client/fscache.h
+++ b/fs/smb/client/fscache.h
@@ -109,6 +109,11 @@ static inline void cifs_readahead_to_fscache(struct inode *inode,
__cifs_readahead_to_fscache(inode, pos, len);
}
+static inline bool cifs_fscache_enabled(struct inode *inode)
+{
+ return fscache_cookie_enabled(cifs_inode_cookie(inode));
+}
+
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
@@ -124,6 +129,7 @@ static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
+static inline bool cifs_fscache_enabled(struct inode *inode) { return false; }
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index d28ab0af6049..60afab5c83d4 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1105,7 +1105,8 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
} else {
cifs_open_info_to_fattr(fattr, data, sb);
}
- if (!rc && fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
+ if (!rc && *inode &&
+ (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING))
cifs_mark_open_handles_for_deleted_file(*inode, full_path);
break;
case -EREMOTE:
@@ -1351,6 +1352,8 @@ cifs_find_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = opaque;
+ /* [!] The compared values must be the same in struct cifs_fscache_inode_key. */
+
/* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index c012dfdba80d..855ac5a62edf 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -247,7 +247,9 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
- if (ses_it->Suid == out.session_id) {
+ spin_lock(&ses_it->ses_lock);
+ if (ses_it->ses_status != SES_EXITING &&
+ ses_it->Suid == out.session_id) {
ses = ses_it;
/*
* since we are using the session outside the crit
@@ -255,9 +257,11 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
* so increment its refcount
*/
cifs_smb_ses_inc_refcount(ses);
+ spin_unlock(&ses_it->ses_lock);
found = true;
goto search_end;
}
+ spin_unlock(&ses_it->ses_lock);
}
}
search_end:
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index c3771fc81328..07c468ddb88a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -98,6 +98,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
kfree_sensitive(buf_to_free->password);
+ kfree_sensitive(buf_to_free->password2);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree_sensitive(buf_to_free->auth_key.response);
@@ -110,9 +111,10 @@ sesInfoFree(struct cifs_ses *buf_to_free)
}
struct cifs_tcon *
-tcon_info_alloc(bool dir_leases_enabled)
+tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
{
struct cifs_tcon *ret_buf;
+ static atomic_t tcon_debug_id;
ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
if (!ret_buf)
@@ -129,7 +131,8 @@ tcon_info_alloc(bool dir_leases_enabled)
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
- ++ret_buf->tc_count;
+ ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
+ ret_buf->tc_count = 1;
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
@@ -138,27 +141,26 @@ tcon_info_alloc(bool dir_leases_enabled)
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
ret_buf->stats_from_time = ktime_get_real_seconds();
-#ifdef CONFIG_CIFS_DFS_UPCALL
- INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#ifdef CONFIG_CIFS_FSCACHE
+ mutex_init(&ret_buf->fscache_lock);
#endif
+ trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
return ret_buf;
}
void
-tconInfoFree(struct cifs_tcon *tcon)
+tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
if (tcon == NULL) {
cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
return;
}
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
free_cached_dirs(tcon->cfids);
atomic_dec(&tconInfoAllocCount);
kfree(tcon->nativeFileSystem);
kfree_sensitive(tcon->password);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
-#endif
kfree(tcon->origin_fullpath);
kfree(tcon);
}
@@ -487,6 +489,8 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid != buf->Tid)
continue;
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index a9eaba8083b0..212ec6f66ec6 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -753,11 +753,11 @@ cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
}
-static void
+static int
cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
- CIFSSMBClose(xid, tcon, fid->netfid);
+ return CIFSSMBClose(xid, tcon, fid->netfid);
}
static int
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 82b84a4941dd..677ef6f99a5b 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -622,6 +622,8 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
@@ -697,6 +699,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
@@ -763,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work)
if (rc)
cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close_fid);
kfree(cancelled);
}
@@ -807,6 +811,8 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
if (tcon->tc_count <= 0) {
struct TCP_Server_Info *server = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_cancelled_close);
WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
spin_unlock(&cifs_tcp_ses_lock);
@@ -819,12 +825,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
return 0;
}
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_cancelled_close);
spin_unlock(&cifs_tcp_ses_lock);
rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
persistent_fid, volatile_fid);
if (rc)
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close);
return rc;
}
@@ -852,7 +860,7 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
rsp->PersistentFileId,
rsp->VolatileFileId);
if (rc)
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_mid);
return rc;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 2ed456948f34..28f0b7d19d53 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -1412,14 +1412,14 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
memcpy(cfile->fid.create_guid, fid->create_guid, 16);
}
-static void
+static int
smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
- SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
-static void
+static int
smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile)
{
@@ -1430,7 +1430,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, &file_inf);
if (rc)
- return;
+ return rc;
inode = d_inode(cfile->dentry);
@@ -1459,6 +1459,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
/* End of file and Attributes should not have to be updated on close */
spin_unlock(&inode->i_lock);
+ return rc;
}
static int
@@ -2480,6 +2481,8 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
spin_lock(&tcon->tc_lock);
@@ -2912,8 +2915,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
tcon = list_first_entry_or_null(&ses->tcon_list,
struct cifs_tcon,
tcon_list);
- if (tcon)
+ if (tcon) {
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_dfs_refer);
+ }
spin_unlock(&cifs_tcp_ses_lock);
}
@@ -2977,6 +2983,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
/* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_dec_dfs_refer);
/* tc_count can never go negative */
WARN_ON(tcon->tc_count < 0);
spin_unlock(&cifs_tcp_ses_lock);
@@ -3913,7 +3921,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
strcat(message, "W");
}
if (!new_oplock)
- strncpy(message, "None", sizeof(message));
+ strscpy(message, "None");
cinode->oplock = new_oplock;
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
@@ -4961,68 +4969,84 @@ static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
return 0;
}
-int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
- struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, umode_t mode, dev_t dev)
+static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev)
{
- struct cifs_open_info_data buf = {};
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms = {};
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_fid fid;
unsigned int bytes_written;
- struct win_dev *pdev;
+ struct win_dev pdev = {};
struct kvec iov[2];
__u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
int rc;
- if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ switch (mode & S_IFMT) {
+ case S_IFCHR:
+ strscpy(pdev.type, "IntxCHR");
+ pdev.major = cpu_to_le64(MAJOR(dev));
+ pdev.minor = cpu_to_le64(MINOR(dev));
+ break;
+ case S_IFBLK:
+ strscpy(pdev.type, "IntxBLK");
+ pdev.major = cpu_to_le64(MAJOR(dev));
+ pdev.minor = cpu_to_le64(MINOR(dev));
+ break;
+ case S_IFIFO:
+ strscpy(pdev.type, "LnxFIFO");
+ break;
+ default:
return -EPERM;
+ }
- oparms = (struct cifs_open_parms) {
- .tcon = tcon,
- .cifs_sb = cifs_sb,
- .desired_access = GENERIC_WRITE,
- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
- CREATE_OPTION_SPECIAL),
- .disposition = FILE_CREATE,
- .path = full_path,
- .fid = &fid,
- };
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
+ FILE_CREATE, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+ oparms.fid = &fid;
- rc = server->ops->open(xid, &oparms, &oplock, &buf);
+ rc = server->ops->open(xid, &oparms, &oplock, NULL);
if (rc)
return rc;
- /*
- * BB Do not bother to decode buf since no local inode yet to put
- * timestamps in, but we can reuse it safely.
- */
- pdev = (struct win_dev *)&buf.fi;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
- io_parms.length = sizeof(*pdev);
- iov[1].iov_base = pdev;
- iov[1].iov_len = sizeof(*pdev);
- if (S_ISCHR(mode)) {
- memcpy(pdev->type, "IntxCHR", 8);
- pdev->major = cpu_to_le64(MAJOR(dev));
- pdev->minor = cpu_to_le64(MINOR(dev));
- } else if (S_ISBLK(mode)) {
- memcpy(pdev->type, "IntxBLK", 8);
- pdev->major = cpu_to_le64(MAJOR(dev));
- pdev->minor = cpu_to_le64(MINOR(dev));
- } else if (S_ISFIFO(mode)) {
- memcpy(pdev->type, "LnxFIFO", 8);
- }
+ io_parms.length = sizeof(pdev);
+ iov[1].iov_base = &pdev;
+ iov[1].iov_len = sizeof(pdev);
rc = server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
server->ops->close(xid, tcon, &fid);
- d_drop(dentry);
- /* FIXME: add code here to set EAs */
- cifs_free_open_info(&buf);
+ return rc;
+}
+
+int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev)
+{
+ struct inode *new = NULL;
+ int rc;
+
+ rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
+ full_path, mode, dev);
+ if (rc)
+ return rc;
+
+ if (tcon->posix_extensions) {
+ rc = smb311_posix_get_inode_info(&new, full_path, NULL,
+ inode->i_sb, xid);
+ } else if (tcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&new, full_path,
+ inode->i_sb, xid);
+ } else {
+ rc = cifs_get_inode_info(&new, full_path, NULL,
+ inode->i_sb, xid, NULL);
+ }
+ if (!rc)
+ d_instantiate(dentry, new);
return rc;
}
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 3ea688558e6c..a5efce03cb58 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -367,6 +367,17 @@ again:
}
rc = cifs_setup_session(0, ses, server, nls_codepage);
+ if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect (key rotation
+ * could be enabled on the server e.g.) if an alternate
+ * password is available and the current password is expired,
+ * but do not swap on non pwd related errors like host down
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
+ }
+
if ((rc == -EACCES) && !tcon->retry) {
mutex_unlock(&ses->session_mutex);
rc = -EHOSTDOWN;
@@ -3628,9 +3639,9 @@ replay_again:
memcpy(&pbuf->network_open_info,
&rsp->network_open_info,
sizeof(pbuf->network_open_info));
+ atomic_dec(&tcon->num_remote_opens);
}
- atomic_dec(&tcon->num_remote_opens);
close_exit:
SMB2_close_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
@@ -4127,6 +4138,8 @@ void smb2_reconnect_server(struct work_struct *work)
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_reconnect_server);
list_add_tail(&tcon->rlist, &tmp_list);
tcon_selected = true;
}
@@ -4165,14 +4178,14 @@ void smb2_reconnect_server(struct work_struct *work)
if (tcon->ipc)
cifs_put_smb_ses(tcon->ses);
else
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
}
if (!ses_exist)
goto done;
/* allocate a dummy tcon struct used for reconnect */
- tcon = tcon_info_alloc(false);
+ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
if (!tcon) {
resched = true;
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
@@ -4195,7 +4208,7 @@ void smb2_reconnect_server(struct work_struct *work)
list_del_init(&ses->rlist);
cifs_put_smb_ses(ses);
}
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
done:
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index c72a3b2886b7..2fccf0d4f53d 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -320,7 +320,7 @@ struct smb2_file_reparse_point_info {
} __packed;
struct smb2_file_network_open_info {
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 5a3ca62d2f07..02135a605305 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -189,6 +189,8 @@ smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
if (tcon->tid != tid)
continue;
++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_find_sess_tcon);
return tcon;
}
@@ -659,7 +661,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
}
spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) {
- strncpy(shdr->Signature, "BSRSPYL", 8);
+ strscpy(shdr->Signature, "BSRSPYL");
return 0;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index f9c1fd32d0b8..604e52876cd2 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -3,6 +3,9 @@
* Copyright (C) 2018, Microsoft Corporation.
*
* Author(s): Steve French <stfrench@microsoft.com>
+ *
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cifs
@@ -15,9 +18,70 @@
#include <linux/inet.h>
/*
- * Please use this 3-part article as a reference for writing new tracepoints:
- * https://lwn.net/Articles/379903/
+ * Specify enums for tracing information.
*/
+#define smb3_tcon_ref_traces \
+ EM(netfs_trace_tcon_ref_dec_dfs_refer, "DEC DfsRef") \
+ EM(netfs_trace_tcon_ref_free, "FRE ") \
+ EM(netfs_trace_tcon_ref_free_fail, "FRE Fail ") \
+ EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \
+ EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \
+ EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \
+ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
+ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
+ EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
+ EM(netfs_trace_tcon_ref_get_reconnect_server, "GET Reconn") \
+ EM(netfs_trace_tcon_ref_new, "NEW ") \
+ EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \
+ EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
+ EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
+ EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
+ EM(netfs_trace_tcon_ref_put_tlink, "PUT Tlink ") \
+ EM(netfs_trace_tcon_ref_see_cancelled_close, "SEE Cn-Cls") \
+ EM(netfs_trace_tcon_ref_see_fscache_collision, "SEE FV-CO!") \
+ EM(netfs_trace_tcon_ref_see_fscache_okay, "SEE FV-Ok ") \
+ EM(netfs_trace_tcon_ref_see_fscache_relinq, "SEE FV-Rlq") \
+ E_(netfs_trace_tcon_ref_see_umount, "SEE Umount")
+
+#undef EM
+#undef E_
+
+/*
+ * Define those tracing enums.
+ */
+#ifndef __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum smb3_tcon_ref_trace { smb3_tcon_ref_traces } __mode(byte);
+
+#undef EM
+#undef E_
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+smb3_tcon_ref_traces;
+
+#undef EM
+#undef E_
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
/* For logging errors in read or write */
DECLARE_EVENT_CLASS(smb3_rw_err_class,
@@ -375,6 +439,7 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);
DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
TP_PROTO(unsigned int xid,
@@ -415,7 +480,7 @@ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_wsl_ea_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
-
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);
DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
TP_PROTO(unsigned int xid,
@@ -461,6 +526,7 @@ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_wsl_ea_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);
/*
* For logging SMB3 Status code and Command for responses which return errors
@@ -1123,6 +1189,30 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
DEFINE_SMB3_CREDIT_EVENT(set_credits);
+
+TRACE_EVENT(smb3_tcon_ref,
+ TP_PROTO(unsigned int tcon_debug_id, int ref,
+ enum smb3_tcon_ref_trace trace),
+ TP_ARGS(tcon_debug_id, ref, trace),
+ TP_STRUCT__entry(
+ __field(unsigned int, tcon)
+ __field(int, ref)
+ __field(enum smb3_tcon_ref_trace, trace)
+ ),
+ TP_fast_assign(
+ __entry->tcon = tcon_debug_id;
+ __entry->ref = ref;
+ __entry->trace = trace;
+ ),
+ TP_printk("TC=%08x %s r=%u",
+ __entry->tcon,
+ __print_symbolic(__entry->trace, smb3_tcon_ref_traces),
+ __entry->ref)
+ );
+
+
+#undef EM
+#undef E_
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 994d70193432..ddf1a3aafee5 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -909,12 +909,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
+ spin_unlock(&server->mid_lock);
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
+ goto sync_mid_done;
}
spin_unlock(&server->mid_lock);
+sync_mid_done:
release_mid(mid);
return rc;
}
@@ -1057,9 +1060,11 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
index = (uint)atomic_inc_return(&ses->chan_seq);
index %= ses->chan_count;
}
+
+ server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
- return ses->chans[index].server;
+ return server;
}
int
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 1b594307c9d5..202ff9128156 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -711,7 +711,7 @@ struct smb2_close_rsp {
__le16 StructureSize; /* 60 */
__le16 Flags;
__le32 Reserved;
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index 8ca8a45c4c62..f4e55199938d 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -167,7 +167,8 @@ struct ksmbd_share_config_response {
__u16 force_uid;
__u16 force_gid;
__s8 share_name[KSMBD_REQ_MAX_SHARE_NAME];
- __u32 reserved[112]; /* Reserved room */
+ __u32 reserved[111]; /* Reserved room */
+ __u32 payload_sz;
__u32 veto_list_sz;
__s8 ____payload[];
};
@@ -339,23 +340,24 @@ enum KSMBD_TREE_CONN_STATUS {
/*
* Share config flags.
*/
-#define KSMBD_SHARE_FLAG_INVALID (0)
-#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
-#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
-#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
-#define KSMBD_SHARE_FLAG_READONLY BIT(3)
-#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
-#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
-#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
-#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
-#define KSMBD_SHARE_FLAG_PIPE BIT(8)
-#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
-#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
-#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
-#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
-#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
-#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
-#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
+#define KSMBD_SHARE_FLAG_INVALID (0)
+#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
+#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
+#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
+#define KSMBD_SHARE_FLAG_READONLY BIT(3)
+#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
+#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
+#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
+#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
+#define KSMBD_SHARE_FLAG_PIPE BIT(8)
+#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
+#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
+#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
+#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
+#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
+#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
+#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
+#define KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY BIT(16)
/*
* Tree connect request flags.
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index 328a412259dc..a2f0a2edceb8 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -158,7 +158,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
share->name = kstrdup(name, GFP_KERNEL);
if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
- share->path = kstrdup(ksmbd_share_config_path(resp),
+ int path_len = PATH_MAX;
+
+ if (resp->payload_sz)
+ path_len = resp->payload_sz - resp->veto_list_sz;
+
+ share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
GFP_KERNEL);
if (share->path)
share->path_sz = strlen(share->path);
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index c0788188aa82..c67fbc8d6683 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
int rc;
bool is_chained = false;
- if (conn->ops->allocate_rsp_buf(work))
- return;
-
if (conn->ops->is_transform_hdr &&
conn->ops->is_transform_hdr(work->request_buf)) {
rc = conn->ops->decrypt_req(work);
- if (rc < 0) {
- conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
- goto send;
- }
-
+ if (rc < 0)
+ return;
work->encrypted = true;
}
+ if (conn->ops->allocate_rsp_buf(work))
+ return;
+
rc = conn->ops->init_rsp_hdr(work);
if (rc) {
/* either uid or tid is not correct */
diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
index a45f7dca482e..606aa3c5189a 100644
--- a/fs/smb/server/smb2ops.c
+++ b/fs/smb/server/smb2ops.c
@@ -228,6 +228,11 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+ (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+ conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
}
@@ -278,11 +283,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
- if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
- (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
- conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
- conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index d478fa0c57ab..355824151c2d 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (cmd == SMB2_QUERY_INFO_HE) {
struct smb2_query_info_req *req;
+ if (get_rfc1002_len(work->request_buf) <
+ offsetof(struct smb2_query_info_req, OutputBufferLength))
+ return -EINVAL;
+
req = smb2_get_msg(work->request_buf);
if ((req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
@@ -1984,7 +1988,12 @@ int smb2_tree_connect(struct ksmbd_work *work)
write_unlock(&sess->tree_conns_lock);
rsp->StructureSize = cpu_to_le16(16);
out_err1:
- rsp->Capabilities = 0;
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE &&
+ test_share_config_flag(share,
+ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
+ rsp->Capabilities = SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY;
+ else
+ rsp->Capabilities = 0;
rsp->Reserved = 0;
/* default manual caching */
rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
@@ -3498,7 +3507,9 @@ int smb2_open(struct ksmbd_work *work)
memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
if (dh_info.type == DURABLE_REQ_V2 || dh_info.type == DURABLE_REQ) {
- if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent)
+ if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent &&
+ test_share_config_flag(work->tcon->share_conf,
+ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
fp->is_persistent = true;
else
fp->is_durable = true;
@@ -5857,8 +5868,9 @@ static int smb2_rename(struct ksmbd_work *work,
if (!file_info->ReplaceIfExists)
flags = RENAME_NOREPLACE;
- smb_break_all_levII_oplock(work, fp, 0);
rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ if (!rc)
+ smb_break_all_levII_oplock(work, fp, 0);
out:
kfree(new_name);
return rc;
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index f29bb03f0dc4..8752ac82c557 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -65,6 +65,7 @@ struct ipc_msg_table_entry {
struct hlist_node ipc_table_hlist;
void *response;
+ unsigned int msg_sz;
};
static struct delayed_work ipc_timer_work;
@@ -275,6 +276,7 @@ static int handle_response(int type, void *payload, size_t sz)
}
memcpy(entry->response, payload, sz);
+ entry->msg_sz = sz;
wake_up_interruptible(&entry->wait);
ret = 0;
break;
@@ -453,6 +455,34 @@ out:
return ret;
}
+static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
+{
+ unsigned int msg_sz = entry->msg_sz;
+
+ if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
+ struct ksmbd_rpc_command *resp = entry->response;
+
+ msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
+ } else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
+ struct ksmbd_spnego_authen_response *resp = entry->response;
+
+ msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
+ resp->session_key_len + resp->spnego_blob_len;
+ } else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
+ struct ksmbd_share_config_response *resp = entry->response;
+
+ if (resp->payload_sz) {
+ if (resp->payload_sz < resp->veto_list_sz)
+ return -EINVAL;
+
+ msg_sz = sizeof(struct ksmbd_share_config_response) +
+ resp->payload_sz;
+ }
+ }
+
+ return entry->msg_sz != msg_sz ? -EINVAL : 0;
+}
+
static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
{
struct ipc_msg_table_entry entry;
@@ -477,6 +507,13 @@ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle
ret = wait_event_interruptible_timeout(entry.wait,
entry.response != NULL,
IPC_WAIT_TIMEOUT);
+ if (entry.response) {
+ ret = ipc_validate_msg(&entry);
+ if (ret) {
+ kvfree(entry.response);
+ entry.response = NULL;
+ }
+ }
out:
down_write(&ipc_msg_table_lock);
hash_del(&entry.ipc_table_hlist);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 22f0f3db3ac9..51b1b0bed616 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -754,10 +754,15 @@ retry:
goto out4;
}
+ /*
+ * explicitly handle file overwrite case, for compatibility with
+ * filesystems that may not support rename flags (e.g: fuse)
+ */
if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
err = -EEXIST;
goto out4;
}
+ flags &= ~(RENAME_NOREPLACE);
if (old_child == trap) {
err = -EINVAL;
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index aa3411354e66..16bd693d0b3a 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
gid_t i_gid;
int err;
+ inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+ if (inode->i_ino == 0)
+ return -EINVAL;
+
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
if (err)
return err;
@@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
- inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
diff --git a/fs/super.c b/fs/super.c
index 71d9779c42b1..69ce6c600968 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1515,29 +1515,11 @@ static int fs_bdev_thaw(struct block_device *bdev)
return error;
}
-static void fs_bdev_super_get(void *data)
-{
- struct super_block *sb = data;
-
- spin_lock(&sb_lock);
- sb->s_count++;
- spin_unlock(&sb_lock);
-}
-
-static void fs_bdev_super_put(void *data)
-{
- struct super_block *sb = data;
-
- put_super(sb);
-}
-
const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync,
.freeze = fs_bdev_freeze,
.thaw = fs_bdev_thaw,
- .get_holder = fs_bdev_super_get,
- .put_holder = fs_bdev_super_put,
};
EXPORT_SYMBOL_GPL(fs_holder_ops);
@@ -1562,7 +1544,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
- fput(bdev_file);
+ bdev_fput(bdev_file);
return -EACCES;
}
@@ -1573,7 +1555,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
- fput(bdev_file);
+ bdev_fput(bdev_file);
return -EBUSY;
}
spin_lock(&sb_lock);
@@ -1693,7 +1675,7 @@ void kill_block_super(struct super_block *sb)
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 6b7652fb8050..7cd64021d453 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -463,6 +463,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn)
kernfs_break_active_protection(kn);
+ else
+ kobject_put(kobj);
return kn;
}
EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index dc067eeb6387..894c6ca1e500 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -336,6 +336,7 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
/**
* lookup_file - look up a file in the tracefs filesystem
+ * @parent_ei: Pointer to the eventfs_inode that represents parent of the file
* @dentry: the dentry to look up
* @mode: the permission that the file should have.
* @attr: saved attributes changed by user
@@ -389,6 +390,7 @@ static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
/**
* lookup_dir_entry - look up a dir in the tracefs filesystem
* @dentry: the directory to look up
+ * @pei: Pointer to the parent eventfs_inode if available
* @ei: the eventfs_inode that represents the directory to create
*
* This function will look up a dentry for a directory represented by
@@ -478,16 +480,20 @@ void eventfs_d_release(struct dentry *dentry)
/**
* lookup_file_dentry - create a dentry for a file of an eventfs_inode
+ * @dentry: The parent dentry under which the new file's dentry will be created
* @ei: the eventfs_inode that the file will be created under
* @idx: the index into the entry_attrs[] of the @ei
- * @parent: The parent dentry of the created file.
- * @name: The name of the file to create
* @mode: The mode of the file.
* @data: The data to use to set the inode of the file with on open()
* @fops: The fops of the file to be created.
*
- * Create a dentry for a file of an eventfs_inode @ei and place it into the
- * address located at @e_dentry.
+ * This function creates a dentry for a file associated with an
+ * eventfs_inode @ei. It uses the entry attributes specified by @idx,
+ * if available. The file will have the specified @mode and its inode will be
+ * set up with @data upon open. The file operations will be set to @fops.
+ *
+ * Return: Returns a pointer to the newly created file's dentry or an error
+ * pointer.
*/
static struct dentry *
lookup_file_dentry(struct dentry *dentry,
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index 2307f8037efc..118dedef8ebe 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -218,6 +218,7 @@ const struct file_operations vboxsf_reg_fops = {
.release = vboxsf_file_release,
.fsync = noop_fsync,
.splice_read = filemap_splice_read,
+ .setlease = simple_nosetlease,
};
const struct inode_operations vboxsf_reg_iops = {
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index cabe8ac4fefc..ffb1d565da39 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -151,11 +151,11 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
if (!sbi->nls) {
vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
err = -EINVAL;
- goto fail_free;
+ goto fail_destroy_idr;
}
}
- sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
+ sbi->bdi_id = ida_alloc(&vboxsf_bdi_ida, GFP_KERNEL);
if (sbi->bdi_id < 0) {
err = sbi->bdi_id;
goto fail_free;
@@ -221,9 +221,10 @@ fail_unmap:
vboxsf_unmap_folder(sbi->root);
fail_free:
if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
+fail_destroy_idr:
idr_destroy(&sbi->ino_idr);
kfree(sbi);
return err;
@@ -268,7 +269,7 @@ static void vboxsf_put_super(struct super_block *sb)
vboxsf_unmap_folder(sbi->root);
if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index 72ac9320e6a3..9515bbf0b54c 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -440,7 +440,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
{
const char *in;
char *out;
- size_t out_len;
size_t out_bound_len;
size_t in_bound_len;
@@ -448,7 +447,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
in_bound_len = utf8_len;
out = name;
- out_len = 0;
/* Reserve space for terminating 0 */
out_bound_len = name_bound_len - 1;
@@ -469,7 +467,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
out += nb;
out_bound_len -= nb;
- out_len += nb;
}
*out = 0;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index d991eec05436..73a4b895de67 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -530,7 +530,8 @@ xfs_validate_sb_common(
}
if (!xfs_validate_stripe_geometry(mp, XFS_FSB_TO_B(mp, sbp->sb_unit),
- XFS_FSB_TO_B(mp, sbp->sb_width), 0, false))
+ XFS_FSB_TO_B(mp, sbp->sb_width), 0,
+ xfs_buf_daddr(bp) == XFS_SB_DADDR, false))
return -EFSCORRUPTED;
/*
@@ -1323,8 +1324,10 @@ xfs_sb_get_secondary(
}
/*
- * sunit, swidth, sectorsize(optional with 0) should be all in bytes,
- * so users won't be confused by values in error messages.
+ * sunit, swidth, sectorsize(optional with 0) should be all in bytes, so users
+ * won't be confused by values in error messages. This function returns false
+ * if the stripe geometry is invalid and the caller is unable to repair the
+ * stripe configuration later in the mount process.
*/
bool
xfs_validate_stripe_geometry(
@@ -1332,20 +1335,21 @@ xfs_validate_stripe_geometry(
__s64 sunit,
__s64 swidth,
int sectorsize,
+ bool may_repair,
bool silent)
{
if (swidth > INT_MAX) {
if (!silent)
xfs_notice(mp,
"stripe width (%lld) is too large", swidth);
- return false;
+ goto check_override;
}
if (sunit > swidth) {
if (!silent)
xfs_notice(mp,
"stripe unit (%lld) is larger than the stripe width (%lld)", sunit, swidth);
- return false;
+ goto check_override;
}
if (sectorsize && (int)sunit % sectorsize) {
@@ -1353,21 +1357,21 @@ xfs_validate_stripe_geometry(
xfs_notice(mp,
"stripe unit (%lld) must be a multiple of the sector size (%d)",
sunit, sectorsize);
- return false;
+ goto check_override;
}
if (sunit && !swidth) {
if (!silent)
xfs_notice(mp,
"invalid stripe unit (%lld) and stripe width of 0", sunit);
- return false;
+ goto check_override;
}
if (!sunit && swidth) {
if (!silent)
xfs_notice(mp,
"invalid stripe width (%lld) and stripe unit of 0", swidth);
- return false;
+ goto check_override;
}
if (sunit && (int)swidth % (int)sunit) {
@@ -1375,9 +1379,27 @@ xfs_validate_stripe_geometry(
xfs_notice(mp,
"stripe width (%lld) must be a multiple of the stripe unit (%lld)",
swidth, sunit);
- return false;
+ goto check_override;
}
return true;
+
+check_override:
+ if (!may_repair)
+ return false;
+ /*
+ * During mount, mp->m_dalign will not be set unless the sunit mount
+ * option was set. If it was set, ignore the bad stripe alignment values
+ * and allow the validation and overwrite later in the mount process to
+ * attempt to overwrite the bad stripe alignment values with the values
+ * supplied by mount options.
+ */
+ if (!mp->m_dalign)
+ return false;
+ if (!silent)
+ xfs_notice(mp,
+"Will try to correct with specified mount options sunit (%d) and swidth (%d)",
+ BBTOB(mp->m_dalign), BBTOB(mp->m_swidth));
+ return true;
}
/*
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 2e8e8d63d4eb..37b1ed1bc209 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -35,8 +35,9 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp,
struct xfs_trans *tp, xfs_agnumber_t agno,
struct xfs_buf **bpp);
-extern bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
- __s64 sunit, __s64 swidth, int sectorsize, bool silent);
+bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
+ __s64 sunit, __s64 swidth, int sectorsize, bool may_repair,
+ bool silent);
uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index abff79a77c72..47a20cf5205f 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -1044,9 +1044,7 @@ xchk_irele(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
- if (current->journal_info != NULL) {
- ASSERT(current->journal_info == sc->tp);
-
+ if (sc->tp) {
/*
* If we are in a transaction, we /cannot/ drop the inode
* ourselves, because the VFS will trigger writeback, which
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 1698507d1ac7..3f428620ebf2 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -503,13 +503,6 @@ xfs_vm_writepages(
{
struct xfs_writepage_ctx wpc = { };
- /*
- * Writing back data in a transaction context can result in recursive
- * transactions. This is bad, so issue a warning and get out of here.
- */
- if (WARN_ON_ONCE(current->journal_info))
- return 0;
-
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 1a18c381127e..f0fa02264eda 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2030,7 +2030,7 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
- fput(btp->bt_bdev_file);
+ bdev_fput(btp->bt_bdev_file);
kfree(btp);
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e64265bc0b33..74f1812b03cb 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -2039,8 +2039,10 @@ xfs_inodegc_want_queue_work(
* - Memory shrinkers queued the inactivation worker and it hasn't finished.
* - The queue depth exceeds the maximum allowable percpu backlog.
*
- * Note: If the current thread is running a transaction, we don't ever want to
- * wait for other transactions because that could introduce a deadlock.
+ * Note: If we are in a NOFS context here (e.g. current thread is running a
+ * transaction) the we don't want to block here as inodegc progress may require
+ * filesystem resources we hold to make progress and that could result in a
+ * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
*/
static inline bool
xfs_inodegc_want_flush_work(
@@ -2048,7 +2050,7 @@ xfs_inodegc_want_flush_work(
unsigned int items,
unsigned int shrinker_hits)
{
- if (current->journal_info)
+ if (current->flags & PF_MEMALLOC_NOFS)
return false;
if (shrinker_hits > 0)
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ea48774f6b76..d55b42b2480d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1301,8 +1301,19 @@ xfs_link(
*/
if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
tdp->i_projid != sip->i_projid)) {
- error = -EXDEV;
- goto error_return;
+ /*
+ * Project quota setup skips special files which can
+ * leave inodes in a PROJINHERIT directory without a
+ * project ID set. We need to allow links to be made
+ * to these "project-less" inodes because userspace
+ * expects them to succeed after project ID setup,
+ * but everything else should be rejected.
+ */
+ if (!special_file(VFS_I(sip)->i_mode) ||
+ sip->i_projid != 0) {
+ error = -EXDEV;
+ goto error_return;
+ }
}
if (!resblks) {
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c21f10ab0f5d..bce020374c5e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -485,7 +485,7 @@ xfs_open_devices(
mp->m_logdev_targp = mp->m_ddev_targp;
/* Handle won't be used, drop it */
if (logdev_file)
- fput(logdev_file);
+ bdev_fput(logdev_file);
}
return 0;
@@ -497,10 +497,10 @@ xfs_open_devices(
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
if (rtdev_file)
- fput(rtdev_file);
+ bdev_fput(rtdev_file);
out_close_logdev:
if (logdev_file)
- fput(logdev_file);
+ bdev_fput(logdev_file);
return error;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 3f7e3a09a49f..1636663707dc 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -268,19 +268,14 @@ static inline void
xfs_trans_set_context(
struct xfs_trans *tp)
{
- ASSERT(current->journal_info == NULL);
tp->t_pflags = memalloc_nofs_save();
- current->journal_info = tp;
}
static inline void
xfs_trans_clear_context(
struct xfs_trans *tp)
{
- if (current->journal_info == tp) {
- memalloc_nofs_restore(tp->t_pflags);
- current->journal_info = NULL;
- }
+ memalloc_nofs_restore(tp->t_pflags);
}
static inline void
@@ -288,10 +283,8 @@ xfs_trans_switch_context(
struct xfs_trans *old_tp,
struct xfs_trans *new_tp)
{
- ASSERT(current->journal_info == old_tp);
new_tp->t_pflags = old_tp->t_pflags;
old_tp->t_pflags = 0;
- current->journal_info = new_tp;
}
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index c6a124e8d565..964fa7f24003 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1048,7 +1048,7 @@ static int zonefs_init_zgroup(struct super_block *sb,
zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
zonefs_zgroup_name(ztype),
zgroup->g_nr_zones,
- zgroup->g_nr_zones > 1 ? "s" : "");
+ str_plural(zgroup->g_nr_zones));
return 0;
}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 5de954e2b18a..e7796f373d0d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -911,17 +911,19 @@ static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
* acpi_dev_hid_uid_match - Match device by supplied HID and UID
* @adev: ACPI device to match.
* @hid2: Hardware ID of the device.
- * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
*
* Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
* will be treated as a match. If user wants to validate @uid2, it should be
* done before calling this function.
*
- * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
*/
#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
(acpi_dev_hid_match(adev, hid2) && \
- (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+ /* Distinguish integer 0 from NULL @uid2 */ \
+ (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
+ acpi_dev_uid_match(adev, uid2)))
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 0c0695763bea..d4f581c1e21d 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -294,5 +294,13 @@ do { \
#define io_stop_wc() do { } while (0)
#endif
+/*
+ * Architectures that guarantee an implicit smp_mb() in switch_mm()
+ * can override smp_mb__after_switch_mm.
+ */
+#ifndef smp_mb__after_switch_mm
+# define smp_mb__after_switch_mm() smp_mb()
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6e794420bd39..b7de3a4eade1 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -156,7 +156,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
-#define BUG() do {} while (1)
+#define BUG() do { \
+ do {} while (1); \
+ unreachable(); \
+} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
deleted file mode 100644
index 570cd4da7210..000000000000
--- a/include/asm-generic/export.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef __ASM_GENERIC_EXPORT_H
-#define __ASM_GENERIC_EXPORT_H
-
-/*
- * <asm/export.h> and <asm-generic/export.h> are deprecated.
- * Please include <linux/export.h> directly.
- */
-#include <linux/export.h>
-
-#endif
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index 87e3d49a4e29..814207e7c37f 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -512,13 +512,9 @@ struct hv_proximity_domain_flags {
u32 proximity_info_valid : 1;
} __packed;
-/* Not a union in windows but useful for zeroing */
-union hv_proximity_domain_info {
- struct {
- u32 domain_id;
- struct hv_proximity_domain_flags flags;
- };
- u64 as_uint64;
+struct hv_proximity_domain_info {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
} __packed;
struct hv_lp_startup_status {
@@ -532,14 +528,13 @@ struct hv_lp_startup_status {
} __packed;
/* HvAddLogicalProcessor hypercall */
-struct hv_add_logical_processor_in {
+struct hv_input_add_logical_processor {
u32 lp_index;
u32 apic_id;
- union hv_proximity_domain_info proximity_domain_info;
- u64 flags;
+ struct hv_proximity_domain_info proximity_domain_info;
} __packed;
-struct hv_add_logical_processor_out {
+struct hv_output_add_logical_processor {
struct hv_lp_startup_status startup_status;
} __packed;
@@ -560,7 +555,7 @@ struct hv_create_vp {
u8 padding[3];
u8 subnode_type;
u64 subnode_id;
- union hv_proximity_domain_info proximity_domain_info;
+ struct hv_proximity_domain_info proximity_domain_info;
u64 flags;
} __packed;
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 99935779682d..8fe7aaab2599 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
+#include <acpi/acpi_numa.h>
#include <linux/cpumask.h>
#include <linux/nmi.h>
#include <asm/ptrace.h>
@@ -67,6 +68,19 @@ extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
bool hv_isolation_type_snp(void);
bool hv_isolation_type_tdx(void);
+static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
+{
+ struct hv_proximity_domain_info pxm_info = {};
+
+ if (node != NUMA_NO_NODE) {
+ pxm_info.domain_id = node_to_pxm(node);
+ pxm_info.flags.proximity_info_valid = 1;
+ pxm_info.flags.proximity_preferred = 1;
+ }
+
+ return pxm_info;
+}
+
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
{
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 724c45e3e9a7..9be85b821aa6 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -22,6 +22,9 @@
#ifndef __AMD_ASIC_TYPE_H__
#define __AMD_ASIC_TYPE_H__
+
+#include <linux/types.h>
+
/*
* Supported ASIC types
*/
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index e0c105051246..9764d6eb5beb 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -11,9 +11,11 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+struct platform_device;
struct samsung_dsim;
#define DSIM_STATE_ENABLED BIT(0)
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 4891bd916d26..0b032faa8cf2 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1150,6 +1150,8 @@
#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
+# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0)
+# define DP_ADAPTIVE_SYNC_SDP_LENGTH GENMASK(5, 0)
# define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1)
# define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4)
@@ -1639,10 +1641,12 @@ enum drm_dp_phy {
#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */
#define DP_SDP_ISRC 0x06 /* DP 1.2 */
#define DP_SDP_VSC 0x07 /* DP 1.2 */
+#define DP_SDP_ADAPTIVE_SYNC 0x22 /* DP 1.4 */
#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */
#define DP_SDP_PPS 0x10 /* DP 1.4 */
#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
+
/* 0x80+ CEA-861 infoframe types */
#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b
@@ -1798,4 +1802,11 @@ enum dp_content_type {
DP_CONTENT_TYPE_GAME = 0x04,
};
+enum operation_mode {
+ DP_AS_SDP_AVT_DYNAMIC_VTOTAL = 0x00,
+ DP_AS_SDP_AVT_FIXED_VTOTAL = 0x01,
+ DP_AS_SDP_FAVT_TRR_NOT_REACHED = 0x02,
+ DP_AS_SDP_FAVT_TRR_REACHED = 0x03
+};
+
#endif /* _DRM_DP_H_ */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index a62fcd051d4d..8bed890eec2c 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -98,9 +98,39 @@ struct drm_dp_vsc_sdp {
enum dp_content_type content_type;
};
+/**
+ * struct drm_dp_as_sdp - drm DP Adaptive Sync SDP
+ *
+ * This structure represents a DP AS SDP of drm
+ * It is based on DP 2.1 spec [Table 2-126: Adaptive-Sync SDP Header Bytes] and
+ * [Table 2-127: Adaptive-Sync SDP Payload for DB0 through DB8]
+ *
+ * @sdp_type: Secondary-data packet type
+ * @revision: Revision Number
+ * @length: Number of valid data bytes
+ * @vtotal: Minimum Vertical Vtotal
+ * @target_rr: Target Refresh
+ * @duration_incr_ms: Successive frame duration increase
+ * @duration_decr_ms: Successive frame duration decrease
+ * @operation_mode: Adaptive Sync Operation Mode
+ */
+struct drm_dp_as_sdp {
+ unsigned char sdp_type;
+ unsigned char revision;
+ unsigned char length;
+ int vtotal;
+ int target_rr;
+ int duration_incr_ms;
+ int duration_decr_ms;
+ enum operation_mode mode;
+};
+
+void drm_dp_as_sdp_log(struct drm_printer *p,
+ const struct drm_dp_as_sdp *as_sdp);
void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc);
bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
@@ -222,6 +252,12 @@ drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_128b132b_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B;
+}
+
+static inline bool
drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_EDP_CONFIGURATION_CAP] &
@@ -422,7 +458,18 @@ struct drm_dp_aux {
* @wait_hpd_asserted: wait for HPD to be asserted
*
* This is mainly useful for eDP panels drivers to wait for an eDP
- * panel to finish powering on. This is an optional function.
+ * panel to finish powering on. It is optional for DP AUX controllers
+ * to implement this function. It is required for DP AUX endpoints
+ * (panel drivers) to call this function after powering up but before
+ * doing AUX transfers unless the DP AUX endpoint driver knows that
+ * we're not using the AUX controller's HPD. One example of the panel
+ * driver not needing to call this is if HPD is hooked up to a GPIO
+ * that the panel driver can read directly.
+ *
+ * If a DP AUX controller does not implement this function then it
+ * may still support eDP panels that use the AUX controller's built-in
+ * HPD signal by implementing a long wait for HPD in the transfer()
+ * callback, though this is deprecated.
*
* This function will efficiently wait for the HPD signal to be
* asserted. The `wait_us` parameter that is passed in says that we
@@ -722,7 +769,7 @@ static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel,
#endif
-#ifdef CONFIG_DRM_DP_CEC
+#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CEC
void drm_dp_cec_irq(struct drm_dp_aux *aux);
void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
struct drm_connector *connector);
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 9b19d8bd520a..3546b58a121b 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -83,7 +83,6 @@ struct drm_dp_mst_branch;
* @passthrough_aux: parent aux to which DSC pass-through requests should be
* sent, only set if DSC pass-through is possible.
* @parent: branch device parent of this port
- * @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under.
@@ -818,7 +817,28 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
-bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+/**
+ * enum drm_dp_mst_mode - sink's MST mode capability
+ */
+enum drm_dp_mst_mode {
+ /**
+ * @DRM_DP_SST: The sink does not support MST nor single stream sideband
+ * messaging.
+ */
+ DRM_DP_SST,
+ /**
+ * @DRM_DP_MST: Sink supports MST, more than one stream and single
+ * stream sideband messaging.
+ */
+ DRM_DP_MST,
+ /**
+ * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single
+ * stream sideband messaging.
+ */
+ DRM_DP_SST_SIDEBAND_MSG,
+};
+
+enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
@@ -928,6 +948,13 @@ int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *n
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+static inline
+bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port)
+{
+ return port->port_num >= DP_MST_LOGICAL_PORT_0;
+}
+
+struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port);
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
static inline struct drm_dp_mst_topology_state *
diff --git a/include/drm/display/drm_dsc.h b/include/drm/display/drm_dsc.h
index bc90273d06a6..bbbe7438473d 100644
--- a/include/drm/display/drm_dsc.h
+++ b/include/drm/display/drm_dsc.h
@@ -40,9 +40,6 @@
#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
#define DSC_PPS_NATIVE_420_SHIFT 1
-#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
-#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
-#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
/**
* struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index a5b39fc01003..82570f77e817 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -25,6 +25,8 @@
#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
+#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
+#define DRM_BUDDY_CLEARED BIT(4)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -32,8 +34,9 @@ struct drm_buddy_block {
#define DRM_BUDDY_ALLOCATED (1 << 10)
#define DRM_BUDDY_FREE (2 << 10)
#define DRM_BUDDY_SPLIT (3 << 10)
+#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
/* Free to be used, if needed in the future */
-#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
u64 header;
@@ -86,6 +89,7 @@ struct drm_buddy {
u64 chunk_size;
u64 size;
u64 avail;
+ u64 clear_avail;
};
static inline u64
@@ -113,6 +117,12 @@ drm_buddy_block_is_allocated(struct drm_buddy_block *block)
}
static inline bool
+drm_buddy_block_is_clear(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_CLEAR;
+}
+
+static inline bool
drm_buddy_block_is_free(struct drm_buddy_block *block)
{
return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
@@ -150,7 +160,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
-void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects);
+void drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags);
void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
void drm_buddy_block_print(struct drm_buddy *mm,
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index d47458ecdac4..bc0e66f9c425 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -141,6 +141,13 @@ struct drm_client_buffer {
/**
* @gem: GEM object backing this buffer
+ *
+ * FIXME: The dependency on GEM here isn't required, we could
+ * convert the driver handle to a dma-buf instead and use the
+ * backend-agnostic dma-buf vmap support instead. This would
+ * require that the handle2fd prime ioctl is reworked to pull the
+ * fd_install step out of the driver backend hooks, to make that
+ * final step optional for internal users.
*/
struct drm_gem_object *gem;
@@ -159,6 +166,9 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
+int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy);
+void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer);
int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index b225eeb30d05..1b4c98c2f838 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -22,13 +22,19 @@
#ifndef __DRM_DEBUGFS_CRC_H__
#define __DRM_DEBUGFS_CRC_H__
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct drm_crtc;
+
#define DRM_MAX_CRC_NR 10
/**
* struct drm_crtc_crc_entry - entry describing a frame's content
* @has_frame_counter: whether the source was able to provide a frame number
* @frame: number of the frame this CRC is about, if @has_frame_counter is true
- * @crc: array of values that characterize the frame
+ * @crcs: array of values that characterize the frame
*/
struct drm_crtc_crc_entry {
bool has_frame_counter;
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7923bc00dc7a..b085525e53e2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -30,6 +30,7 @@ struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_edid;
+struct drm_printer;
struct hdmi_avi_infoframe;
struct hdmi_vendor_infoframe;
struct i2c_adapter;
@@ -272,14 +273,27 @@ struct detailed_timing {
#define DRM_EDID_DSC_MAX_SLICES 0xf
#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
+struct drm_edid_product_id {
+ __be16 manufacturer_name;
+ __le16 product_code;
+ __le32 serial_number;
+ u8 week_of_manufacture;
+ u8 year_of_manufacture;
+} __packed;
+
struct edid {
u8 header[8];
/* Vendor & product info */
- u8 mfg_id[2];
- u8 prod_code[2];
- u32 serial; /* FIXME: byte order */
- u8 mfg_week;
- u8 mfg_year;
+ union {
+ struct drm_edid_product_id product_id;
+ struct {
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ } __packed;
+ } __packed;
/* EDID version */
u8 version;
u8 revision;
@@ -312,6 +326,13 @@ struct edid {
u8 checksum;
} __packed;
+/* EDID matching */
+struct drm_edid_ident {
+ /* ID encoded by drm_edid_encode_panel_id() */
+ u32 panel_id;
+ const char *name;
+};
+
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
/* Short Audio Descriptor */
@@ -327,8 +348,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
-
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
@@ -410,7 +429,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
-u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
@@ -450,14 +468,19 @@ const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
void *context);
+const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter);
const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *edid);
int drm_edid_connector_add_modes(struct drm_connector *connector);
bool drm_edid_is_digital(const struct drm_edid *drm_edid);
-
-const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
- int ext_id, int *ext_index);
+void drm_edid_get_product_id(const struct drm_edid *drm_edid,
+ struct drm_edid_product_id *id);
+void drm_edid_print_product_id(struct drm_printer *p,
+ const struct drm_edid_product_id *id, bool raw);
+u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
+bool drm_edid_match(const struct drm_edid *drm_edid,
+ const struct drm_edid_ident *ident);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 7214101fd731..49172166a164 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -34,12 +34,6 @@
/**
* struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
- * @set_config: Initialize any encoder-specific modesetting parameters.
- * The meaning of the @params parameter is implementation
- * dependent. It will usually be a structure with DVO port
- * data format settings or timings. It's not required for
- * the new parameters to take effect until the next mode
- * is set.
*
* Most of its members are analogous to the function pointers in
* &drm_encoder_helper_funcs and they can optionally be used to
@@ -48,41 +42,85 @@
* if the encoder is the currently selected one for the connector.
*/
struct drm_encoder_slave_funcs {
+ /**
+ * @set_config: Initialize any encoder-specific modesetting parameters.
+ * The meaning of the @params parameter is implementation dependent. It
+ * will usually be a structure with DVO port data format settings or
+ * timings. It's not required for the new parameters to take effect
+ * until the next mode is set.
+ */
void (*set_config)(struct drm_encoder *encoder,
void *params);
+ /**
+ * @destroy: Analogous to &drm_encoder_funcs @destroy callback.
+ */
void (*destroy)(struct drm_encoder *encoder);
+
+ /**
+ * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback. Wrapped
+ * by drm_i2c_encoder_dpms().
+ */
void (*dpms)(struct drm_encoder *encoder, int mode);
+
+ /**
+ * @save: Save state. Wrapped by drm_i2c_encoder_save().
+ */
void (*save)(struct drm_encoder *encoder);
+
+ /**
+ * @restore: Restore state. Wrapped by drm_i2c_encoder_restore().
+ */
void (*restore)(struct drm_encoder *encoder);
+
+ /**
+ * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup
+ * callback. Wrapped by drm_i2c_encoder_mode_fixup().
+ */
bool (*mode_fixup)(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+ /**
+ * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid.
+ */
int (*mode_valid)(struct drm_encoder *encoder,
struct drm_display_mode *mode);
+ /**
+ * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set
+ * callback. Wrapped by drm_i2c_encoder_mode_set().
+ */
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+ /**
+ * @detect: Analogous to &drm_encoder_helper_funcs @detect
+ * callback. Wrapped by drm_i2c_encoder_detect().
+ */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
+ /**
+ * @get_modes: Get modes.
+ */
int (*get_modes)(struct drm_encoder *encoder,
struct drm_connector *connector);
+ /**
+ * @create_resources: Create resources.
+ */
int (*create_resources)(struct drm_encoder *encoder,
struct drm_connector *connector);
+ /**
+ * @set_property: Set property.
+ */
int (*set_property)(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
-
};
/**
* struct drm_encoder_slave - Slave encoder struct
- * @base: DRM encoder object.
- * @slave_funcs: Slave encoder callbacks.
- * @slave_priv: Slave encoder private data.
- * @bus_priv: Bus specific data.
*
* A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
* ones in @base. The former are never actually called by the common
@@ -95,10 +133,24 @@ struct drm_encoder_slave_funcs {
* this.
*/
struct drm_encoder_slave {
+ /**
+ * @base: DRM encoder object.
+ */
struct drm_encoder base;
+ /**
+ * @slave_funcs: Slave encoder callbacks.
+ */
const struct drm_encoder_slave_funcs *slave_funcs;
+
+ /**
+ * @slave_priv: Slave encoder private data.
+ */
void *slave_priv;
+
+ /**
+ * @bus_priv: Bus specific data.
+ */
void *bus_priv;
};
#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
@@ -112,16 +164,20 @@ int drm_i2c_encoder_init(struct drm_device *dev,
/**
* struct drm_i2c_encoder_driver
*
- * Describes a device driver for an encoder connected to the GPU
- * through an I2C bus. In addition to the entry points in @i2c_driver
- * an @encoder_init function should be provided. It will be called to
- * give the driver an opportunity to allocate any per-encoder data
- * structures and to initialize the @slave_funcs and (optionally)
- * @slave_priv members of @encoder.
+ * Describes a device driver for an encoder connected to the GPU through an I2C
+ * bus.
*/
struct drm_i2c_encoder_driver {
+ /**
+ * @i2c_driver: I2C device driver description.
+ */
struct i2c_driver i2c_driver;
+ /**
+ * @encoder_init: Callback to allocate any per-encoder data structures
+ * and to initialize the @slave_funcs and (optionally) @slave_priv
+ * members of @encoder.
+ */
int (*encoder_init)(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder);
@@ -133,6 +189,7 @@ struct drm_i2c_encoder_driver {
/**
* drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
+ * @encoder: The encoder
*/
static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
{
diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h
index d5e036c57801..c950732c6d36 100644
--- a/include/drm/drm_fb_dma_helper.h
+++ b/include/drm/drm_fb_dma_helper.h
@@ -6,7 +6,9 @@
struct drm_device;
struct drm_framebuffer;
+struct drm_plane;
struct drm_plane_state;
+struct drm_scanout_buffer;
struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
@@ -19,5 +21,8 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
struct drm_plane_state *old_state,
struct drm_plane_state *state);
+int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#endif
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index f13b34e0b752..428d81afe215 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -25,6 +25,7 @@ struct iosys_map;
* All fields are considered private.
*/
struct drm_format_conv_state {
+ /* private: */
struct {
void *mem;
size_t size;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 2ebec3984cd4..bae4865b2101 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -527,6 +527,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
+void drm_gem_lock(struct drm_gem_object *obj);
+void drm_gem_unlock(struct drm_gem_object *obj);
+
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index bf0c31aa8fbe..efbc9f27312b 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -108,6 +108,9 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
+
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
@@ -173,7 +176,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_pin(shmem);
+ return drm_gem_shmem_pin_locked(shmem);
}
/**
@@ -187,7 +190,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_unpin(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
}
/**
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 2938ba80750d..9a73f786f4ad 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -170,7 +170,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
* @vram_base: Base address of the managed video memory
* @vram_size: Size of the managed video memory in bytes
* @bdev: The TTM BO device.
- * @funcs: TTM BO functions
*
* The fields &struct drm_vram_mm.vram_base and
* &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 6e99627edf45..e7cc17ee4934 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -75,7 +75,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
* @_dev: The parent device object
* @_type: the type of the struct which contains struct &drm_device
* @_member: the name of the &drm_device within @_type.
- * @_features: Mocked DRM device driver features
+ * @_feat: Mocked DRM device driver features
*
* This function creates a struct &drm_driver and will create a struct
* &drm_device from @_dev and that driver.
diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h
index 5c9ef6a2aeae..53545b4ca9ef 100644
--- a/include/drm/drm_lease.h
+++ b/include/drm/drm_lease.h
@@ -6,6 +6,8 @@
#ifndef _DRM_LEASE_H_
#define _DRM_LEASE_H_
+#include <linux/types.h>
+
struct drm_file;
struct drm_device;
struct drm_master;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index c0aec0d4d664..82b1cc434ea3 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -226,6 +226,12 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
return -EINVAL;
}
+enum mipi_dsi_compression_algo {
+ MIPI_DSI_COMPRESSION_DSC = 0,
+ MIPI_DSI_COMPRESSION_VENDOR = 3,
+ /* other two values are reserved, DSI 1.3 */
+};
+
struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
@@ -241,9 +247,12 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps);
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector);
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps);
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 973119a9176b..8de3c9a5f61b 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -506,6 +506,16 @@ struct drm_mode_config {
struct list_head plane_list;
/**
+ * @panic_lock:
+ *
+ * Raw spinlock used to protect critical sections of code that access
+ * the display hardware or modeset software state, which the panic
+ * printing code must be protected against. See drm_panic_trylock(),
+ * drm_panic_lock() and drm_panic_unlock().
+ */
+ struct raw_spinlock panic_lock;
+
+ /**
* @num_crtc:
*
* Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime
@@ -942,6 +952,11 @@ struct drm_mode_config {
*/
struct drm_property *modifiers_property;
+ /**
+ * @size_hints_property: Plane SIZE_HINTS property.
+ */
+ struct drm_property *size_hints_property;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 9ed42469540e..ec59015aec3c 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -48,6 +48,7 @@
* To make this clear all the helper vtables are pulled together in this location here.
*/
+struct drm_scanout_buffer;
struct drm_writeback_connector;
struct drm_writeback_job;
@@ -1443,6 +1444,44 @@ struct drm_plane_helper_funcs {
*/
void (*atomic_async_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
+
+ /**
+ * @get_scanout_buffer:
+ *
+ * Get the current scanout buffer, to display a message with drm_panic.
+ * The driver should do the minimum changes to provide a buffer,
+ * that can be used to display the panic screen. Currently only linear
+ * buffers are supported. Non-linear buffer support is on the TODO list.
+ * The device &dev.mode_config.panic_lock is taken before calling this
+ * function, so you can safely access the &plane.state
+ * It is called from a panic callback, and must follow its restrictions.
+ * Please look the documentation at drm_panic_trylock() for an in-depth
+ * discussions of what's safe and what is not allowed.
+ * It's a best effort mode, so it's expected that in some complex cases
+ * the panic screen won't be displayed.
+ * The returned &drm_scanout_buffer.map must be valid if no error code is
+ * returned.
+ *
+ * Return:
+ * %0 on success, negative errno on failure.
+ */
+ int (*get_scanout_buffer)(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+ /**
+ * @panic_flush:
+ *
+ * It is used by drm_panic, and is called after the panic screen is
+ * drawn to the scanout buffer. In this function, the driver
+ * can send additional commands to the hardware, to make the scanout
+ * buffer visible.
+ * It is only called if get_scanout_buffer() returned successfully, and
+ * the &dev.mode_config.panic_lock is held during the entire sequence.
+ * It is called from a panic callback, and must follow its restrictions.
+ * Please look the documentation at drm_panic_trylock() for an in-depth
+ * discussions of what's safe and what is not allowed.
+ */
+ void (*panic_flush)(struct drm_plane *plane);
};
/**
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 082a6e980d01..02d1cdd7f798 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -2,6 +2,7 @@
#ifndef __DRM_OF_H__
#define __DRM_OF_H__
+#include <linux/err.h>
#include <linux/of_graph.h>
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
#include <drm/drm_bridge.h>
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
new file mode 100644
index 000000000000..822dbb1aa9d6
--- /dev/null
+++ b/include/drm/drm_panic.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+#ifndef __DRM_PANIC_H__
+#define __DRM_PANIC_H__
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/iosys-map.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+/*
+ * Copyright (c) 2024 Intel
+ */
+
+/**
+ * struct drm_scanout_buffer - DRM scanout buffer
+ *
+ * This structure holds the information necessary for drm_panic to draw the
+ * panic screen, and display it.
+ */
+struct drm_scanout_buffer {
+ /**
+ * @format:
+ *
+ * drm format of the scanout buffer.
+ */
+ const struct drm_format_info *format;
+
+ /**
+ * @map:
+ *
+ * Virtual address of the scanout buffer, either in memory or iomem.
+ * The scanout buffer should be in linear format, and can be directly
+ * sent to the display hardware. Tearing is not an issue for the panic
+ * screen.
+ */
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @width: Width of the scanout buffer, in pixels.
+ */
+ unsigned int width;
+
+ /**
+ * @height: Height of the scanout buffer, in pixels.
+ */
+ unsigned int height;
+
+ /**
+ * @pitch: Length in bytes between the start of two consecutive lines.
+ */
+ unsigned int pitch[DRM_FORMAT_MAX_PLANES];
+};
+
+/**
+ * drm_panic_trylock - try to enter the panic printing critical section
+ * @dev: struct drm_device
+ * @flags: unsigned long irq flags you need to pass to the unlock() counterpart
+ *
+ * This function must be called by any panic printing code. The panic printing
+ * attempt must be aborted if the trylock fails.
+ *
+ * Panic printing code can make the following assumptions while holding the
+ * panic lock:
+ *
+ * - Anything protected by drm_panic_lock() and drm_panic_unlock() pairs is safe
+ * to access.
+ *
+ * - Furthermore the panic printing code only registers in drm_dev_unregister()
+ * and gets removed in drm_dev_unregister(). This allows the panic code to
+ * safely access any state which is invariant in between these two function
+ * calls, like the list of planes &drm_mode_config.plane_list or most of the
+ * struct drm_plane structure.
+ *
+ * Specifically thanks to the protection around plane updates in
+ * drm_atomic_helper_swap_state() the following additional guarantees hold:
+ *
+ * - It is safe to deference the drm_plane.state pointer.
+ *
+ * - Anything in struct drm_plane_state or the driver's subclass thereof which
+ * stays invariant after the atomic check code has finished is safe to access.
+ * Specifically this includes the reference counted pointers to framebuffer
+ * and buffer objects.
+ *
+ * - Anything set up by &drm_plane_helper_funcs.fb_prepare and cleaned up
+ * &drm_plane_helper_funcs.fb_cleanup is safe to access, as long as it stays
+ * invariant between these two calls. This also means that for drivers using
+ * dynamic buffer management the framebuffer is pinned, and therefer all
+ * relevant datastructures can be accessed without taking any further locks
+ * (which would be impossible in panic context anyway).
+ *
+ * - Importantly, software and hardware state set up by
+ * &drm_plane_helper_funcs.begin_fb_access and
+ * &drm_plane_helper_funcs.end_fb_access is not safe to access.
+ *
+ * Drivers must not make any assumptions about the actual state of the hardware,
+ * unless they explicitly protected these hardware access with drm_panic_lock()
+ * and drm_panic_unlock().
+ *
+ * Return:
+ * %0 when failing to acquire the raw spinlock, nonzero on success.
+ */
+#define drm_panic_trylock(dev, flags) \
+ raw_spin_trylock_irqsave(&(dev)->mode_config.panic_lock, flags)
+
+/**
+ * drm_panic_lock - protect panic printing relevant state
+ * @dev: struct drm_device
+ * @flags: unsigned long irq flags you need to pass to the unlock() counterpart
+ *
+ * This function must be called to protect software and hardware state that the
+ * panic printing code must be able to rely on. The protected sections must be
+ * as small as possible. It uses the irqsave/irqrestore variant, and can be
+ * called from irq handler. Examples include:
+ *
+ * - Access to peek/poke or other similar registers, if that is the way the
+ * driver prints the pixels into the scanout buffer at panic time.
+ *
+ * - Updates to pointers like &drm_plane.state, allowing the panic handler to
+ * safely deference these. This is done in drm_atomic_helper_swap_state().
+ *
+ * - An state that isn't invariant and that the driver must be able to access
+ * during panic printing.
+ */
+
+#define drm_panic_lock(dev, flags) \
+ raw_spin_lock_irqsave(&(dev)->mode_config.panic_lock, flags)
+
+/**
+ * drm_panic_unlock - end of the panic printing critical section
+ * @dev: struct drm_device
+ * @flags: irq flags that were returned when acquiring the lock
+ *
+ * Unlocks the raw spinlock acquired by either drm_panic_lock() or
+ * drm_panic_trylock().
+ */
+#define drm_panic_unlock(dev, flags) \
+ raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
+
+#ifdef CONFIG_DRM_PANIC
+
+void drm_panic_register(struct drm_device *dev);
+void drm_panic_unregister(struct drm_device *dev);
+
+#else
+
+static inline void drm_panic_register(struct drm_device *dev) {}
+static inline void drm_panic_unregister(struct drm_device *dev) {}
+
+#endif
+
+#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 641fe298052d..9507542121fa 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/kmsg_dump.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_rect.h>
@@ -32,6 +33,7 @@
#include <drm/drm_util.h>
struct drm_crtc;
+struct drm_plane_size_hint;
struct drm_printer;
struct drm_modeset_acquire_ctx;
@@ -779,6 +781,11 @@ struct drm_plane {
* @hotspot_y_property: property to set mouse hotspot y offset.
*/
struct drm_property *hotspot_y_property;
+
+ /**
+ * @kmsg_panic: Used to register a panic notifier for this plane
+ */
+ struct kmsg_dumper kmsg_panic;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
@@ -976,5 +983,8 @@ drm_plane_get_damage_clips(const struct drm_plane_state *state);
int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
unsigned int supported_filters);
+int drm_plane_add_size_hints_property(struct drm_plane *plane,
+ const struct drm_plane_size_hint *hints,
+ int num_hints);
#endif
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 9cc473e5d353..089950ad8681 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -28,14 +28,14 @@
#include <linux/compiler.h>
#include <linux/printk.h>
-#include <linux/seq_file.h>
#include <linux/device.h>
-#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+struct debugfs_regset32;
struct drm_device;
+struct seq_file;
/* Do *not* use outside of drm_print.[ch]! */
extern unsigned long __drm_debug;
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 62741a88796b..d6ce7b218b77 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -16,6 +16,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector
int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
+
+int drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
@@ -37,4 +39,8 @@ int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
int drm_connector_helper_get_modes(struct drm_connector *connector);
int drm_connector_helper_tv_get_modes(struct drm_connector *connector);
+int drm_connector_helper_detect_from_ddc(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
+
#endif
diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h
index c2188bb0b157..7ba72a81a808 100644
--- a/include/drm/drm_suballoc.h
+++ b/include/drm/drm_suballoc.h
@@ -37,7 +37,7 @@ struct drm_suballoc_manager {
* @manager: The drm_suballoc_manager.
* @soffset: Start offset.
* @eoffset: End offset + 1 so that @eoffset - @soffset = size.
- * @dma_fence: The fence protecting the allocation.
+ * @fence: The fence protecting the allocation.
*/
struct drm_suballoc {
struct list_head olist;
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 7f3957943dd1..c8f829b4307c 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -225,6 +225,7 @@ struct drm_vblank_crtc {
wait_queue_head_t work_wait_queue;
};
+struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc);
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
deleted file mode 100644
index 228f43e8df89..000000000000
--- a/include/drm/gma_drm.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/**************************************************************************
- * Copyright (c) 2007-2011, Intel Corporation.
- * All Rights Reserved.
- * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
- * All Rights Reserved.
- *
- **************************************************************************/
-
-#ifndef _GMA_DRM_H_
-#define _GMA_DRM_H_
-
-#endif
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
index 8390b437a1f8..5305b9797f93 100644
--- a/include/drm/i2c/ch7006.h
+++ b/include/drm/i2c/ch7006.h
@@ -37,6 +37,7 @@
* meaning.
*/
struct ch7006_encoder_params {
+ /* private: FIXME: document the members */
enum {
CH7006_FORMAT_RGB16 = 0,
CH7006_FORMAT_YCrCb24m16,
diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h
index 205e27384c83..ddf248693c8b 100644
--- a/include/drm/i2c/sil164.h
+++ b/include/drm/i2c/sil164.h
@@ -36,6 +36,7 @@
* See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
*/
struct sil164_encoder_params {
+ /* private: FIXME: document the members */
enum {
SIL164_INPUT_EDGE_FALLING = 0,
SIL164_INPUT_EDGE_RISING
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 56a84ee1c64c..4ea3b17aa143 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -24,7 +24,7 @@
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
-#include "drm_audio_component.h"
+#include <drm/drm_audio_component.h>
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/i915_gsc_proxy_mei_interface.h
index 9462341d3ae1..850dfbf40607 100644
--- a/include/drm/i915_gsc_proxy_mei_interface.h
+++ b/include/drm/i915_gsc_proxy_mei_interface.h
@@ -21,7 +21,7 @@ struct i915_gsc_proxy_component_ops {
struct module *owner;
/**
- * send - Sends a proxy message to ME FW.
+ * @send: Sends a proxy message to ME FW.
* @dev: device struct corresponding to the mei device
* @buf: message buffer to send
* @size: size of the message
@@ -30,7 +30,7 @@ struct i915_gsc_proxy_component_ops {
int (*send)(struct device *dev, const void *buf, size_t size);
/**
- * recv - Receives a proxy message from ME FW.
+ * @recv: Receives a proxy message from ME FW.
* @dev: device struct corresponding to the mei device
* @buf: message buffer to contain the received message
* @size: size of the buffer
diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h
index 4c9c8167c2d5..d776ed7dcd00 100644
--- a/include/drm/i915_hdcp_interface.h
+++ b/include/drm/i915_hdcp_interface.h
@@ -54,7 +54,7 @@ enum hdcp_ddi {
};
/**
- * enum hdcp_tc - ME/GSC Firmware defined index for transcoders
+ * enum hdcp_transcoder - ME/GSC Firmware defined index for transcoders
* @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder
* @HDCP_TRANSCODER_EDP: Index for EDP Transcoder
* @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder
@@ -106,7 +106,7 @@ struct hdcp_port_data {
* And Prepare AKE_Init.
* @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
* AKE_Send_Cert and prepare
- AKE_Stored_Km/AKE_No_Stored_Km
+ * AKE_Stored_Km/AKE_No_Stored_Km
* @verify_hprime: Verify AKE_Send_H_prime
* @store_pairing_info: Store pairing info received
* @initiate_locality_check: Prepare LC_Init
@@ -170,14 +170,22 @@ struct i915_hdcp_ops {
/**
* struct i915_hdcp_arbiter - Used for communication between i915
* and hdcp drivers for the HDCP2.2 services
- * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus.
- * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver.
*/
struct i915_hdcp_arbiter {
+ /**
+ * @hdcp_dev: device that provides the HDCP2.2 service from MEI Bus.
+ */
struct device *hdcp_dev;
+
+ /**
+ * @ops: Ops implemented by hdcp driver or intel_hdcp_gsc, used by i915
+ * driver.
+ */
const struct i915_hdcp_ops *ops;
- /* To protect the above members. */
+ /**
+ * @mutex: To protect the above members.
+ */
struct mutex mutex;
};
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 28a96aa1e08f..85ce33ad6e26 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -711,7 +711,9 @@
INTEL_VGA_DEVICE(0x5692, info), \
INTEL_VGA_DEVICE(0x56A0, info), \
INTEL_VGA_DEVICE(0x56A1, info), \
- INTEL_VGA_DEVICE(0x56A2, info)
+ INTEL_VGA_DEVICE(0x56A2, info), \
+ INTEL_VGA_DEVICE(0x56BE, info), \
+ INTEL_VGA_DEVICE(0x56BF, info)
#define INTEL_DG2_G11_IDS(info) \
INTEL_VGA_DEVICE(0x5693, info), \
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
index 7d96985f2d05..a532d32f58f3 100644
--- a/include/drm/i915_pxp_tee_interface.h
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -12,20 +12,26 @@ struct scatterlist;
/**
* struct i915_pxp_component_ops - ops for PXP services.
- * @owner: Module providing the ops
- * @send: sends data to PXP
- * @receive: receives data from PXP
*/
struct i915_pxp_component_ops {
/**
- * @owner: owner of the module provding the ops
+ * @owner: Module providing the ops.
*/
struct module *owner;
+ /**
+ * @send: Send a PXP message.
+ */
int (*send)(struct device *dev, const void *message, size_t size,
unsigned long timeout_ms);
+ /**
+ * @recv: Receive a PXP message.
+ */
int (*recv)(struct device *dev, void *buffer, size_t size,
unsigned long timeout_ms);
+ /**
+ * @gsc_command: Send a GSC command.
+ */
ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id,
struct scatterlist *sg_in, size_t total_in_len,
struct scatterlist *sg_out);
@@ -35,14 +41,21 @@ struct i915_pxp_component_ops {
/**
* struct i915_pxp_component - Used for communication between i915 and TEE
* drivers for the PXP services
- * @tee_dev: device that provide the PXP service from TEE Bus.
- * @pxp_ops: Ops implemented by TEE driver, used by i915 driver.
*/
struct i915_pxp_component {
+ /**
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ */
struct device *tee_dev;
+
+ /**
+ * @ops: Ops implemented by TEE driver, used by i915 driver.
+ */
const struct i915_pxp_component_ops *ops;
- /* To protect the above members. */
+ /**
+ * @mutex: To protect the above members.
+ */
struct mutex mutex;
};
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 0223a41a64b2..6ccf96c91f3a 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -83,6 +83,9 @@ enum ttm_bo_type {
* @resource: structure describing current placement.
* @ttm: TTM structure holding system pages.
* @deleted: True if the object is only a zombie and already deleted.
+ * @bulk_move: The bulk move object.
+ * @priority: Priority for LRU, BOs with lower priority are evicted first.
+ * @pin_count: Pin count.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -128,26 +131,27 @@ struct ttm_buffer_object {
struct work_struct delayed_delete;
/**
- * Special members that are protected by the reserve lock
- * and the bo::lock when written to. Can be read with
- * either of these locks held.
+ * @sg: external source of pages and DMA addresses, protected by the
+ * reservation lock.
*/
struct sg_table *sg;
};
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+
/**
* struct ttm_bo_kmap_obj
*
* @virtual: The current kernel virtual address.
* @page: The page when kmap'ing a single page.
* @bo_kmap_type: Type of bo_kmap.
+ * @bo: The TTM BO.
*
* Object describing a kernel mapping. Since a TTM bo may be located
* in various memory types with various caching policies, the
* mapping can either be an ioremap, a vmap, a kmap or part of a
* premapped region.
*/
-#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
struct page *page;
@@ -171,6 +175,7 @@ struct ttm_bo_kmap_obj {
* @force_alloc: Don't check the memory account during suspend or CPU page
* faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
+ * @bytes_moved: Statistics on how many bytes have been moved.
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -264,7 +269,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
* ttm_bo_reserve_slowpath:
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
+ * @ticket: Ticket used to acquire the ww_mutex.
*
* This is called after ttm_bo_reserve returns -EAGAIN and we backed off
* from all our other reservations. Because there are no other reservations
@@ -303,7 +308,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
}
/**
- * ttm_bo_move_null = assign memory for a buffer object.
+ * ttm_bo_move_null - assign memory for a buffer object.
* @bo: The bo to assign the memory to
* @new_mem: The memory to be assigned.
*
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index 235a743d90e1..a18f43e93aba 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -25,6 +25,8 @@
#ifndef _TTM_CACHING_H_
#define _TTM_CACHING_H_
+#include <linux/pgtable.h>
+
#define TTM_NUM_CACHING_TYPES 3
/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 03aca29d3ce4..fac1e3e57ebd 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -52,7 +52,7 @@ struct ttm_validate_buffer {
};
/**
- * function ttm_eu_backoff_reservation
+ * ttm_eu_backoff_reservation
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
@@ -64,14 +64,13 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list);
/**
- * function ttm_eu_reserve_buffers
+ * ttm_eu_reserve_buffers
*
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
- * @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -102,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *dups);
/**
- * function ttm_eu_fence_buffer_objects.
+ * ttm_eu_fence_buffer_objects
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
index cc5c09a211b4..fe72631a6e93 100644
--- a/include/drm/ttm/ttm_kmap_iter.h
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -20,7 +20,7 @@ struct iosys_map;
*/
struct ttm_kmap_iter_ops {
/**
- * kmap_local() - Map a PAGE_SIZE part of the resource using
+ * @map_local: Map a PAGE_SIZE part of the resource using
* kmap_local semantics.
* @res_iter: Pointer to the struct ttm_kmap_iter representing
* the resource.
@@ -31,7 +31,7 @@ struct ttm_kmap_iter_ops {
void (*map_local)(struct ttm_kmap_iter *res_iter,
struct iosys_map *dmap, pgoff_t i);
/**
- * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+ * @unmap_local: Unmap a PAGE_SIZE part of the resource previously
* mapped using kmap_local.
* @res_iter: Pointer to the struct ttm_kmap_iter representing
* the resource.
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 4490d43c63e3..160d954a261e 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -32,9 +32,10 @@
#include <drm/ttm/ttm_caching.h>
struct device;
-struct ttm_tt;
-struct ttm_pool;
+struct seq_file;
struct ttm_operation_ctx;
+struct ttm_pool;
+struct ttm_tt;
/**
* struct ttm_pool_type - Pool for a certain memory type
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 1afa13f0c22b..69769355139f 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -251,6 +251,9 @@ struct ttm_lru_bulk_move_pos {
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
+ * All BOs in a bulk_move structure need to share the same reservation object to
+ * ensure that the bulk as a whole is locked for eviction even if only one BO of
+ * the bulk is evicted.
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
@@ -366,7 +369,8 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
const struct ttm_place *place,
size_t size);
bool ttm_resource_compatible(struct ttm_resource *res,
- struct ttm_placement *placement);
+ struct ttm_placement *placement,
+ bool evicting);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo);
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
index de1a344737bc..adb37bc541e4 100644
--- a/include/drm/xe_pciids.h
+++ b/include/drm/xe_pciids.h
@@ -134,7 +134,9 @@
MACRO__(0x5692, ## __VA_ARGS__), \
MACRO__(0x56A0, ## __VA_ARGS__), \
MACRO__(0x56A1, ## __VA_ARGS__), \
- MACRO__(0x56A2, ## __VA_ARGS__)
+ MACRO__(0x56A2, ## __VA_ARGS__), \
+ MACRO__(0x56BE, ## __VA_ARGS__), \
+ MACRO__(0x56BF, ## __VA_ARGS__)
#define XE_DG2_G11_IDS(MACRO__, ...) \
MACRO__(0x5693, ## __VA_ARGS__), \
@@ -176,10 +178,13 @@
/* MTL / ARL */
#define XE_MTL_IDS(MACRO__, ...) \
MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D41, ## __VA_ARGS__), \
MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D51, ## __VA_ARGS__), \
MACRO__(0x7D55, ## __VA_ARGS__), \
MACRO__(0x7D60, ## __VA_ARGS__), \
MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD1, ## __VA_ARGS__), \
MACRO__(0x7DD5, ## __VA_ARGS__)
#define XE_LNL_IDS(MACRO__, ...) \
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index eb4c369a79eb..35d4ca4f6122 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
*/
#define kvm_pmu_update_vcpu_events(vcpu) \
do { \
- if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
+ if (!has_vhe() && kvm_arm_support_pmu_v3()) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c3e8f7cf96be..69e7da33ca49 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -128,6 +128,8 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
struct gendisk {
/*
@@ -1505,16 +1507,6 @@ struct blk_holder_ops {
* Thaw the file system mounted on the block device.
*/
int (*thaw)(struct block_device *bdev);
-
- /*
- * If needed, get a reference to the holder.
- */
- void (*get_holder)(void *holder);
-
- /*
- * Release the holder.
- */
- void (*put_holder)(void *holder);
};
/*
@@ -1585,6 +1577,7 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
int bdev_freeze(struct block_device *bdev);
int bdev_thaw(struct block_device *bdev);
+void bdev_fput(struct file *bdev_file);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index ca73940e26df..3f4b4ac527ca 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+bool __init cmdline_has_extra_options(void);
#else /* !__KERNEL__ */
/*
* NOTE: This is only for tools/bootconfig, because tools/bootconfig will
@@ -287,7 +288,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
/* XBC embedded bootconfig data in kernel */
#ifdef CONFIG_BOOT_CONFIG_EMBED
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4f20f62f9d63..890e152d553e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1574,12 +1574,26 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if underlying BPF program is sleepable we go through tasks trace
+ * RCU GP and then "classic" RCU GP
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index cb0d6cd1c12f..60693a145894 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -90,6 +90,14 @@ enum cc_attr {
* Examples include TDX Guest.
*/
CC_ATTR_HOTPLUG_DISABLED,
+
+ /**
+ * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
+ *
+ * The host kernel is running with the necessary features
+ * enabled to run SEV-SNP guests.
+ */
+ CC_ATTR_HOST_SEV_SNP,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
@@ -107,10 +115,14 @@ enum cc_attr {
* * FALSE - Specified Confidential Computing attribute is not active
*/
bool cc_platform_has(enum cc_attr attr);
+void cc_platform_set(enum cc_attr attr);
+void cc_platform_clear(enum cc_attr attr);
#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+static inline void cc_platform_set(enum cc_attr attr) { }
+static inline void cc_platform_clear(enum cc_attr attr) { }
#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 00623f4de5e1..0fa56d672532 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -286,6 +286,11 @@ static inline int clk_rate_exclusive_get(struct clk *clk)
return 0;
}
+static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ return 0;
+}
+
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c00cc6c0878a..8c252e073bd8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -268,7 +268,7 @@ static inline void *offset_to_ptr(const int *off)
* - When one operand is a null pointer constant (i.e. when x is an integer
* constant expression) and the other is an object pointer (i.e. our
* third operand), the conditional operator returns the type of the
- * object pointer operand (i.e. "int *). Here, within the sizeof(), we
+ * object pointer operand (i.e. "int *"). Here, within the sizeof(), we
* would then get:
* sizeof(*((int *)(...)) == sizeof(int) == 4
* - When one operand is a void pointer (i.e. when x is not an integer
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 272e4e79e15c..861c3bfc5f17 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index c008169ed2c6..c8f7eb6cc191 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -63,6 +63,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp);
+
+void dev_coredump_put(struct device *dev);
#else
static inline void dev_coredumpv(struct device *dev, void *data,
size_t datalen, gfp_t gfp)
@@ -85,6 +87,9 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
{
_devcd_free_sgtable(table);
}
+static inline void dev_coredump_put(struct device *dev)
+{
+}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 97c4b046c09d..b9f5464f44ed 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1247,6 +1247,7 @@ void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+void device_link_wait_removal(void);
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8ff4add71f88..36216d28d8bd 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -370,8 +370,10 @@ struct dma_buf {
*/
struct module *owner;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
/** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+#endif
/** @priv: exporter specific private data for this buffer object. */
void *priv;
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e06bad467f55..c3f9bb6602ba 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
}
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
+ ##args); \
+ } while (0)
+
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 770755df852f..70cd7258cd29 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -245,7 +245,6 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* max utilization to the allowed CPU capacity before calculating
* effective performance.
*/
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 224645f17c33..297231854ada 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -608,6 +608,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
}
/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 0dd27364d56f..811e47f9d1c3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -694,6 +694,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
/*
* Initializes struct fb_ops for deferred I/O.
*/
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 5c28298a98be..366243ee9609 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -10,6 +10,7 @@
#define __QCOM_QSEECOM_H
#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -25,11 +26,56 @@ struct qseecom_client {
};
/**
+ * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
+ * @client: The QSEECOM client device.
+ *
+ * Returns the SCM device under which the provided QSEECOM client device
+ * operates. This function is intended to be used for DMA allocations.
+ */
+static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
+{
+ return client->aux_dev.dev.parent->parent;
+}
+
+/**
+ * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
+ * @client: The QSEECOM client to allocate the memory for.
+ * @size: The number of bytes to allocate.
+ * @dma_handle: Pointer to where the DMA address should be stored.
+ * @gfp: Allocation flags.
+ *
+ * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
+ * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
+ */
+static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
+}
+
+/**
+ * dma_free_coherent() - Free QSEECOM DMA memory.
+ * @client: The QSEECOM client for which the memory has been allocated.
+ * @size: The number of bytes allocated.
+ * @cpu_addr: Virtual memory address to free.
+ * @dma_handle: DMA memory address to free.
+ *
+ * Wrapper function for dma_free_coherent(), freeing memory previously
+ * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
+ * details.
+ */
+static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
+}
+
+/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -43,8 +89,9 @@ struct qseecom_client {
*
* Return: Zero on success, nonzero on failure.
*/
-static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
- void *rsp, size_t rsp_size)
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index ccaf28846054..aaa19f93ac43 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -118,8 +118,8 @@ bool qcom_scm_lmh_dcvsh_available(void);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -128,9 +128,9 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
return -EINVAL;
}
-static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
- size_t req_size, void *rsp,
- size_t rsp_size)
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
index 9a9b88962c29..2b85fe9e7f9a 100644
--- a/include/linux/framer/framer.h
+++ b/include/linux/framer/framer.h
@@ -181,12 +181,12 @@ static inline int framer_notifier_unregister(struct framer *framer,
return -ENOSYS;
}
-struct framer *framer_get(struct device *dev, const char *con_id)
+static inline struct framer *framer_get(struct device *dev, const char *con_id)
{
return ERR_PTR(-ENOSYS);
}
-void framer_put(struct device *dev, struct framer *framer)
+static inline void framer_put(struct device *dev, struct framer *framer)
{
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 00fc429b0af0..8dfd53b52744 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -121,6 +121,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_PWRITE ((__force fmode_t)0x10)
/* File is opened for execution with sys_execve / sys_uselib */
#define FMODE_EXEC ((__force fmode_t)0x20)
+/* File writes are restricted (block device specific) */
+#define FMODE_WRITE_RESTRICTED ((__force fmode_t)0x40)
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 868c8fb1bbc1..13becafe41df 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_GFP_TYPES_H
#define __LINUX_GFP_TYPES_H
+#include <linux/bits.h>
+
/* The typedef is in types.h but we want the documentation here */
#if 0
/**
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index dc75f802e284..f8617eaf08ba 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -646,8 +646,6 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
struct gpio_device *gpio_device_find(const void *data,
int (*match)(struct gpio_chip *gc,
const void *data));
-struct gpio_device *gpio_device_find_by_label(const char *label);
-struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
struct gpio_device *gpio_device_get(struct gpio_device *gdev);
void gpio_device_put(struct gpio_device *gdev);
@@ -814,6 +812,9 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
int gpio_device_get_base(struct gpio_device *gdev);
const char *gpio_device_get_label(struct gpio_device *gdev);
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
#else /* CONFIG_GPIOLIB */
#include <asm/bug.h>
@@ -843,6 +844,18 @@ static inline const char *gpio_device_get_label(struct gpio_device *gdev)
return NULL;
}
+static inline struct gpio_device *gpio_device_find_by_label(const char *label)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
index 6c75c8bd44a0..1a14e239221f 100644
--- a/include/linux/gpio/property.h
+++ b/include/linux/gpio/property.h
@@ -2,7 +2,6 @@
#ifndef __LINUX_GPIO_PROPERTY_H
#define __LINUX_GPIO_PROPERTY_H
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
#include <linux/property.h>
#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6ef0557b4bff..96ceb4095425 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -832,6 +832,7 @@ struct vmbus_gpadl {
u32 gpadl_handle;
u32 size;
void *buffer;
+ bool decrypted;
};
struct vmbus_channel {
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 76121c2bb4f8..5c9bdd3ffccc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -67,6 +67,8 @@
* later.
* IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
* depends on IRQF_PERCPU.
+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
+ * interrupt.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -82,6 +84,7 @@
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_NO_DEBUG 0x00100000
+#define IRQF_COND_ONESHOT 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index e24893625085..ac333ea81d31 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -13,7 +13,7 @@ enum {
* A hint to not wake right away but delay until there are enough of
* tw's queued to match the number of CQEs the task is waiting for.
*
- * Must not be used wirh requests generating more than one CQE.
+ * Must not be used with requests generating more than one CQE.
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
*/
IOU_F_TWQ_LAZY_WAKE = 1,
@@ -294,7 +294,6 @@ struct io_ring_ctx {
struct io_submit_state submit_state;
- struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct io_hash_table cancel_table_locked;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 147feebd508c..3f003d5fde53 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -114,7 +114,7 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
-# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 26d68115afb8..324d792e7c78 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -107,6 +107,7 @@ enum {
ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
+ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0436b919f1c7..b6bdaa18b9e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1223,14 +1223,16 @@ static inline void page_mapcount_reset(struct page *page)
* a large folio, it includes the number of times this page is mapped
* as part of that folio.
*
- * The result is undefined for pages which cannot be mapped into userspace.
- * For example SLAB or special types of pages. See function page_has_type().
- * They use this field in struct page differently.
+ * Will report 0 for pages which cannot be mapped into userspace, eg
+ * slab, page tables and similar.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
+ /* Handle page_has_type() pages */
+ if (mapcount < 0)
+ mapcount = 0;
if (unlikely(PageCompound(page)))
mapcount += folio_entire_mapcount(page_folio(page));
@@ -2207,11 +2209,6 @@ static inline int arch_make_folio_accessible(struct folio *folio)
*/
#include <linux/vmstat.h>
-static __always_inline void *lowmem_page_address(const struct page *page)
-{
- return page_to_virt(page);
-}
-
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif
@@ -2234,6 +2231,11 @@ void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
+static __always_inline void *lowmem_page_address(const struct page *page)
+{
+ return page_to_virt(page);
+}
+
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address) do { } while(0)
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dc7048824be8..bcb201ab7a41 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -162,6 +162,14 @@ calc_vm_flag_bits(unsigned long flags)
unsigned long vm_commit_limit(void);
+#ifndef arch_memory_deny_write_exec_supported
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return true;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+#endif
+
/*
* Denies creating a writable executable mapping or gaining executable permissions.
*
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 3921fbed0b28..51421fdbb0ba 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,10 +17,12 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
+ OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -28,6 +30,7 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
+ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -64,6 +67,7 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
+ OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 652d77805e99..4bf1c25fd1dc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,7 +190,6 @@ enum pageflags {
/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_error,
- PG_hugetlb = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
@@ -458,30 +457,51 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
+#define FOLIO_TEST_FLAG_FALSE(name) \
+static inline bool folio_test_##name(const struct folio *folio) \
+{ return false; }
+#define FOLIO_SET_FLAG_NOOP(name) \
+static inline void folio_set_##name(struct folio *folio) { }
+#define FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void folio_clear_##name(struct folio *folio) { }
+#define __FOLIO_SET_FLAG_NOOP(name) \
+static inline void __folio_set_##name(struct folio *folio) { }
+#define __FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void __folio_clear_##name(struct folio *folio) { }
+#define FOLIO_TEST_SET_FLAG_FALSE(name) \
+static inline bool folio_test_set_##name(struct folio *folio) \
+{ return false; }
+#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
+static inline bool folio_test_clear_##name(struct folio *folio) \
+{ return false; }
+
+#define FOLIO_FLAG_FALSE(name) \
+FOLIO_TEST_FLAG_FALSE(name) \
+FOLIO_SET_FLAG_NOOP(name) \
+FOLIO_CLEAR_FLAG_NOOP(name)
+
#define TESTPAGEFLAG_FALSE(uname, lname) \
-static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
+FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_set_##lname(struct folio *folio) { } \
+FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_clear_##lname(struct folio *folio) { } \
+FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void __folio_clear_##lname(struct folio *folio) { } \
+__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname, lname) \
-static inline bool folio_test_set_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname, lname) \
-static inline bool folio_test_clear_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
@@ -855,29 +875,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#define PG_head_mask ((1UL << PG_head))
-#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(const struct page *page);
-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context. Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-static inline bool folio_test_hugetlb(const struct folio *folio)
-{
- return folio_test_large(folio) &&
- test_bit(PG_hugetlb, const_folio_flags(folio, 1));
-}
-#else
-TESTPAGEFLAG_FALSE(Huge, hugetlb)
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -934,33 +931,22 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * Check if a page is currently marked HWPoisoned. Note that this check is
- * best effort only and inherently racy: there is no way to synchronize with
- * failing hardware.
- */
-static inline bool is_page_hwpoison(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- return PageHuge(page) && PageHWPoison(compound_head(page));
-}
-
-/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of page_mapcount() won't be
+ * low bits so that an underflow or overflow of _mapcount won't be
* mistaken for a page type value.
*/
#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of page_mapcount */
+/* Reserve 0x0000007f to catch underflows of _mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
+#define PG_hugetlb 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -977,35 +963,38 @@ static inline int page_has_type(const struct page *page)
return page_type_has_type(page->page_type);
}
+#define FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline bool folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+}
+
#define PAGE_TYPE_OPS(uname, lname, fname) \
+FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
-static __always_inline int folio_test_##fname(const struct folio *folio)\
-{ \
- return folio_test_type(folio, PG_##lname); \
-} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
-static __always_inline void __folio_set_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
-} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-} \
-static __always_inline void __folio_clear_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
-} \
+}
/*
* PageBuddy() indicates that the page is free and in the buddy system
@@ -1052,6 +1041,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
+#ifdef CONFIG_HUGETLB_PAGE
+FOLIO_TYPE_OPS(hugetlb, hugetlb)
+#else
+FOLIO_TEST_FLAG_FALSE(hugetlb)
+#endif
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(const struct page *page)
+{
+ return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1118,7 +1138,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index fcc06c300a72..5d3a0cccc6bf 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
-/* 15 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 15
+/* 31 pointers + header align the folio_batch structure to a power of two */
+#define PAGEVEC_SIZE 31
struct folio;
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 9b3d36aff431..90e241458ef6 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -58,7 +58,6 @@ static inline struct peci_controller *to_peci_controller(void *d)
/**
* struct peci_device - PECI device
* @dev: device object to register PECI device to the device model
- * @controller: manages the bus segment hosting this PECI device
* @info: PECI device characteristics
* @info.family: device family
* @info.model: device model
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 11db1ec516e2..04ae5ebcb637 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -18,13 +18,8 @@ struct proc_dir_entry;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
-void create_prof_cpu_mask(void);
int create_proc_profile(void);
#else
-static inline void create_prof_cpu_mask(void)
-{
-}
-
static inline int create_proc_profile(void)
{
return 0;
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 5d868505a94e..6d92b68efbf6 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- offset ^= (rand); \
+ offset = ror32(offset, 5) ^ (rand); \
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
index 29c4e4f243e4..f2394a409c9d 100644
--- a/include/linux/rwbase_rt.h
+++ b/include/linux/rwbase_rt.h
@@ -31,9 +31,9 @@ static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
return atomic_read(&rwb->readers) != READER_BIAS;
}
-static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
+static __always_inline bool rw_base_is_write_locked(const struct rwbase_rt *rwb)
{
- WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
+ return atomic_read(&rwb->readers) == WRITER_BIAS;
}
static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 4f1c18992f76..c8b543d428b0 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -167,14 +167,14 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
return rw_base_is_locked(&sem->rwbase);
}
-static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
-static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
{
- rw_base_assert_held_write(sem);
+ WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 35f3a4a8ceb1..acf7e1a3f3de 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -13,10 +13,10 @@ static inline bool folio_is_secretmem(struct folio *folio)
/*
* Using folio_mapping() is quite slow because of the actual call
* instruction.
- * We know that secretmem pages are not compound and LRU so we can
+ * We know that secretmem pages are not compound, so we can
* save a couple of cycles here.
*/
- if (folio_test_large(folio) || !folio_test_lru(folio))
+ if (folio_test_large(folio))
return false;
mapping = (struct address_space *)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a4c15db2f5e5..3fb18f7eb73e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -110,8 +110,17 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags);
+#else
+static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
#else
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0c7c67b3a87b..9d24aec064e8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -753,8 +753,6 @@ typedef unsigned char *sk_buff_data_t;
* @list: queue head
* @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
- * fragmentation management
* @dev: Device we arrived on/are leaving by
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -875,10 +873,7 @@ struct sk_buff {
struct llist_node ll_node;
};
- union {
- struct sock *sk;
- int ip_defrag_offset;
- };
+ struct sock *sk;
union {
ktime_t tstamp;
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index 307961b41541..317200cd3a60 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -50,11 +50,36 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
return 0;
}
+/* Deprecated.
+ * This is unsafe, unless caller checked user provided optlen.
+ * Prefer copy_safe_from_sockptr() instead.
+ */
static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
{
return copy_from_sockptr_offset(dst, src, 0, size);
}
+/**
+ * copy_safe_from_sockptr: copy a struct from sockptr
+ * @dst: Destination address, in kernel space. This buffer must be @ksize
+ * bytes long.
+ * @ksize: Size of @dst struct.
+ * @optval: Source address. (in user or kernel space)
+ * @optlen: Size of @optval data.
+ *
+ * Returns:
+ * * -EINVAL: @optlen < @ksize
+ * * -EFAULT: access to userspace failed.
+ * * 0 : @ksize bytes were copied
+ */
+static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
+ sockptr_t optval, unsigned int optlen)
+{
+ if (optlen < ksize)
+ return -EINVAL;
+ return copy_from_sockptr(dst, optval, ksize);
+}
+
static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
sockptr_t src, size_t usize)
{
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 3c6caa5abc7c..e9ec32fb97d4 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -44,10 +44,9 @@ typedef u32 depot_stack_handle_t;
union handle_parts {
depot_stack_handle_t handle;
struct {
- /* pool_index is offset by 1 */
- u32 pool_index : DEPOT_POOL_INDEX_BITS;
- u32 offset : DEPOT_OFFSET_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
+ u32 pool_index_plus_1 : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 24cd199dd6f3..d33bab33099a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -210,7 +210,6 @@ struct svc_rdma_recv_ctxt {
*/
struct svc_rdma_write_info {
struct svcxprt_rdma *wi_rdma;
- struct list_head wi_list;
const struct svc_rdma_chunk *wi_chunk;
@@ -239,10 +238,7 @@ struct svc_rdma_send_ctxt {
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
-
- struct list_head sc_write_info_list;
struct svc_rdma_write_info sc_reply_info;
-
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
@@ -274,14 +270,11 @@ extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
struct svc_rdma_chunk_ctxt *cc,
enum dma_data_direction dir);
-extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt);
extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
- const struct svc_rdma_pcl *write_pcl,
- struct svc_rdma_send_ctxt *sctxt,
- const struct xdr_buf *xdr);
+extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_pcl *write_pcl,
const struct svc_rdma_pcl *reply_pcl,
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 48b700ba1d18..a5c560a2f8c2 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -390,6 +390,35 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
}
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_MEMORY_FAILURE
+
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_HWPOISON;
+}
+
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#endif
+
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
@@ -483,8 +512,9 @@ static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They are used to represent unaddressable device memory
- * and to restrict access to a page undergoing migration.
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
@@ -492,7 +522,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
return is_migration_entry(entry) || is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry);
+ is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
}
struct page_vma_mapped_walk;
@@ -561,35 +591,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-#ifdef CONFIG_MEMORY_FAILURE
-
-/*
- * Support for hardware poisoned pages
- */
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- BUG_ON(!PageLocked(page));
- return swp_entry(SWP_HWPOISON, page_to_pfn(page));
-}
-
-static inline int is_hwpoison_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_HWPOISON;
-}
-
-#else
-
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- return swp_entry(0, 0);
-}
-
-static inline int is_hwpoison_entry(swp_entry_t swp)
-{
- return 0;
-}
-#endif
-
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index c6540ceea143..0982d1d52b24 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -22,7 +22,7 @@
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
- * subtraction of non 64 bit counters,
+ * subtraction of non-64-bit counters,
* see CYCLECOUNTER_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
@@ -35,7 +35,7 @@ struct cyclecounter {
};
/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * struct timecounter - layer above a &struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
@@ -66,6 +66,8 @@ struct timecounter {
* @cycles: Cycles
* @mask: bit mask for maintaining the 'frac' field
* @frac: pointer to storage for the fractional nanoseconds.
+ *
+ * Returns: cycle counter cycles converted to nanoseconds
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
u64 cycles, u64 mask, u64 *frac)
@@ -79,6 +81,7 @@ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
/**
* timecounter_adjtime - Shifts the time of the clock.
+ * @tc: The &struct timecounter to adjust
* @delta: Desired change in nanoseconds.
*/
static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
@@ -107,6 +110,8 @@ extern void timecounter_init(struct timecounter *tc,
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
+ *
+ * Returns: nanoseconds since the initial time stamp
*/
extern u64 timecounter_read(struct timecounter *tc);
@@ -123,6 +128,8 @@ extern u64 timecounter_read(struct timecounter *tc);
*
* This allows conversion of cycle counter values which were generated
* in the past.
+ *
+ * Returns: cycle counter converted to nanoseconds since the initial time stamp
*/
extern u64 timecounter_cyc2time(const struct timecounter *tc,
u64 cycle_tstamp);
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 7e50cbd97f86..0ea7823b7f31 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -22,14 +22,14 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
- * ktime_get() family: read the current time in a multitude of ways,
+ * ktime_get() family - read the current time in a multitude of ways.
*
* The default time reference is CLOCK_MONOTONIC, starting at
* boot time but not counting the time spent in suspend.
* For other references, use the functions with "real", "clocktai",
* "boottime" and "raw" suffixes.
*
- * To get the time in a different format, use the ones wit
+ * To get the time in a different format, use the ones with
* "ns", "ts64" and "seconds" suffix.
*
* See Documentation/core-api/timekeeping.rst for more details.
@@ -74,6 +74,8 @@ extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
+ *
+ * Returns: real (wall) time in ktime_t format
*/
static inline ktime_t ktime_get_real(void)
{
@@ -86,10 +88,12 @@ static inline ktime_t ktime_get_coarse_real(void)
}
/**
- * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ * ktime_get_boottime - Get monotonic time since boot in ktime_t format
*
* This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
* time spent in suspend.
+ *
+ * Returns: monotonic time since boot in ktime_t format
*/
static inline ktime_t ktime_get_boottime(void)
{
@@ -102,7 +106,9 @@ static inline ktime_t ktime_get_coarse_boottime(void)
}
/**
- * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
+ * ktime_get_clocktai - Get the TAI time of day in ktime_t format
+ *
+ * Returns: the TAI time of day in ktime_t format
*/
static inline ktime_t ktime_get_clocktai(void)
{
@@ -144,32 +150,60 @@ static inline u64 ktime_get_coarse_clocktai_ns(void)
/**
* ktime_mono_to_real - Convert monotonic time to clock realtime
+ * @mono: monotonic time to convert
+ *
+ * Returns: time converted to realtime clock
*/
static inline ktime_t ktime_mono_to_real(ktime_t mono)
{
return ktime_mono_to_any(mono, TK_OFFS_REAL);
}
+/**
+ * ktime_get_ns - Get the current time in nanoseconds
+ *
+ * Returns: current time converted to nanoseconds
+ */
static inline u64 ktime_get_ns(void)
{
return ktime_to_ns(ktime_get());
}
+/**
+ * ktime_get_real_ns - Get the current real/wall time in nanoseconds
+ *
+ * Returns: current real time converted to nanoseconds
+ */
static inline u64 ktime_get_real_ns(void)
{
return ktime_to_ns(ktime_get_real());
}
+/**
+ * ktime_get_boottime_ns - Get the monotonic time since boot in nanoseconds
+ *
+ * Returns: current boottime converted to nanoseconds
+ */
static inline u64 ktime_get_boottime_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}
+/**
+ * ktime_get_clocktai_ns - Get the current TAI time of day in nanoseconds
+ *
+ * Returns: current TAI time converted to nanoseconds
+ */
static inline u64 ktime_get_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}
+/**
+ * ktime_get_raw_ns - Get the raw monotonic time in nanoseconds
+ *
+ * Returns: current raw monotonic time converted to nanoseconds
+ */
static inline u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
@@ -224,8 +258,8 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
-/*
- * struct ktime_timestanps - Simultaneous mono/boot/real timestamps
+/**
+ * struct ktime_timestamps - Simultaneous mono/boot/real timestamps
* @mono: Monotonic timestamp
* @boot: Boottime timestamp
* @real: Realtime timestamp
@@ -242,7 +276,8 @@ struct ktime_timestamps {
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @raw: Monotonic raw system time
- * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_id: Clocksource ID
+ * @clock_was_set_seq: The sequence number of clock-was-set events
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 14a633ba61d6..e67ecd1cbc97 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -22,7 +22,7 @@
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
#endif
-/**
+/*
* @TIMER_DEFERRABLE: A deferrable timer will work normally when the
* system is busy, but will not cause a CPU to come out of idle just
* to service it; instead, the timer will be serviced when the CPU
@@ -140,7 +140,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
* or not. Callers must ensure serialization wrt. other operations done
* to this timer, eg. interrupt contexts, or other CPUs on SMP.
*
- * return value: 1 if the timer is pending, 0 if not.
+ * Returns: 1 if the timer is pending, 0 if not.
*/
static inline int timer_pending(const struct timer_list * timer)
{
@@ -175,6 +175,10 @@ extern int timer_shutdown(struct timer_list *timer);
* See timer_delete_sync() for detailed explanation.
*
* Do not use in new code. Use timer_delete_sync() instead.
+ *
+ * Returns:
+ * * %0 - The timer was not pending
+ * * %1 - The timer was pending and deactivated
*/
static inline int del_timer_sync(struct timer_list *timer)
{
@@ -188,6 +192,10 @@ static inline int del_timer_sync(struct timer_list *timer)
* See timer_delete() for detailed explanation.
*
* Do not use in new code. Use timer_delete() instead.
+ *
+ * Returns:
+ * * %0 - The timer was not pending
+ * * %1 - The timer was pending and deactivated
*/
static inline int del_timer(struct timer_list *timer)
{
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index ffe48e69b3f3..457879938fc1 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
p->v++;
}
-static inline void u64_stats_init(struct u64_stats_sync *syncp)
-{
- seqcount_init(&syncp->seq);
-}
+#define u64_stats_init(syncp) \
+ do { \
+ struct u64_stats_sync *__s = (syncp); \
+ seqcount_init(&__s->seq); \
+ } while (0)
static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 3748e82b627b..e398e1dbd2d3 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -108,7 +108,7 @@ struct udp_sock {
#define udp_assign_bit(nr, sk, val) \
assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
-#define UDP_MAX_SEGMENTS (1 << 6UL)
+#define UDP_MAX_SEGMENTS (1 << 7UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
@@ -150,6 +150,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
}
}
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
+#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+#endif
+
+static inline bool udp_encap_needed(void)
+{
+ if (static_branch_unlikely(&udp_encap_needed_key))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (static_branch_unlikely(&udpv6_encap_needed_key))
+ return true;
+#endif
+
+ return false;
+}
+
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
if (!skb_is_gso(skb))
@@ -163,6 +181,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
!udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
+ /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
+ * land in a tunnel as the socket check in udp_gro_receive cannot be
+ * foolproof.
+ */
+ if (udp_encap_needed() &&
+ READ_ONCE(udp_sk(sk)->encap_rcv) &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
+ return true;
+
return false;
}
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b0201747a263..26c4325aa373 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -170,7 +170,7 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
/**
* struct virtio_driver - operations for a virtio I/O driver
- * @driver: underlying device driver (populate name and owner).
+ * @driver: underlying device driver (populate name).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
* @feature_table_size: number of entries in the feature table array.
@@ -208,7 +208,10 @@ static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
return container_of(drv, struct virtio_driver, driver);
}
-int register_virtio_driver(struct virtio_driver *drv);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_virtio_driver(drv) \
+ __register_virtio_driver(drv, THIS_MODULE)
+int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
void unregister_virtio_driver(struct virtio_driver *drv);
/* module_virtio_driver() - Helper macro for drivers that don't do
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9d06eb945509..62a407db1bf5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -438,6 +438,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
+static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
+{
+ return refcount_inc_not_zero(&ifp->refcnt);
+}
/*
* compute link-local solicited-node multicast address
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 627ea8e2d915..3dee0b2721aa 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -85,6 +85,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
+ * candidates to close a small race window.
+ */
};
static inline void unix_state_lock_nested(struct sock *sk,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9fe95a22abeb..eaec5d6caa29 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -585,6 +585,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
+static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
+ sockptr_t src, size_t src_size)
+{
+ if (dst_size > src_size)
+ return -EINVAL;
+
+ return copy_from_sockptr(dst, src, dst_size);
+}
+
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8701ca5f31ee..5c12761cbc0e 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -176,6 +176,15 @@ enum {
*/
HCI_QUIRK_USE_BDADDR_PROPERTY,
+ /* When this quirk is set, the Bluetooth Device Address provided by
+ * the 'local-bd-address' fwnode property is incorrectly specified in
+ * big-endian order.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
+
/* When this quirk is set, the duplicate filtering during
* scanning is based on Bluetooth devices addresses. To allow
* RSSI based updates, restart scanning if needed.
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 56fb42df44a3..e8f581f3f3ce 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -738,6 +738,8 @@ struct hci_conn {
__u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
__u16 le_per_adv_data_len;
__u16 le_per_adv_data_offset;
+ __u8 le_adv_phy;
+ __u8 le_adv_sec_phy;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -1512,7 +1514,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role);
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
@@ -1905,6 +1907,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
(hdev->commands[39] & 0x04))
+#define read_key_size_capable(dev) \
+ ((dev)->commands[20] & 0x10 && \
+ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2e2be4fd2bb6..1e09329acc42 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4991,6 +4991,7 @@ struct cfg80211_ops {
* set this flag to update channels on beacon hints.
* @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
* of an NSTR mobile AP MLD.
+ * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -5002,6 +5003,7 @@ enum wiphy_flags {
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
+ WIPHY_FLAG_DISABLE_WEXT = BIT(9),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 9ab4bf704e86..ccf171f7eb60 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -175,6 +175,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
void (*delack_handler)(struct timer_list *),
void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_clear_xmit_timers_sync(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5cd64bb2104d..c286cc2e766e 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -361,6 +361,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
+/* Variant of pskb_inet_may_pull().
+ */
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+{
+ int nhlen = 0, maclen = ETH_HLEN;
+ __be16 type = skb->protocol;
+
+ /* Essentially this is skb_protocol(skb, true)
+ * And we get MAC len.
+ */
+ if (eth_type_vlan(type))
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ switch (type) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ }
+ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
+ * a base network header in skb->head.
+ */
+ if (!pskb_may_pull(skb, maclen + nhlen))
+ return false;
+
+ skb_set_network_header(skb, maclen);
+ return true;
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 353488ab94a2..2d7f87bc5324 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -953,6 +953,8 @@ enum mac80211_tx_info_flags {
* of their QoS TID or other priority field values.
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
* for sequence number assignment
+ * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
+ * due to scanning, not in normal operation on the interface.
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
* frame should be transmitted on the specific link. This really is
* only relevant for frames that do not have data present, and is
@@ -973,6 +975,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
+ IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
};
diff --git a/include/net/macsec.h b/include/net/macsec.h
index dbd22180cc5c..de216cbc6b05 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -321,6 +321,7 @@ struct macsec_context {
* for the TX tag
* @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
* TX tag
+ * @rx_uses_md_dst: whether MACsec device offload supports sk_buff md_dst
*/
struct macsec_ops {
/* Device wide */
@@ -352,6 +353,7 @@ struct macsec_ops {
struct sk_buff *skb);
unsigned int needed_headroom;
unsigned int needed_tailroom;
+ bool rx_uses_md_dst;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 76147feb0d10..4eeedf14711b 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -39,7 +39,6 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
#define RX_BUFFERS_PER_QUEUE 512
-#define MANA_RX_DATA_ALIGN 64
#define MAX_SEND_BUFFERS_PER_QUEUE 256
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index a763dd327c6e..9abb7ee40d72 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -336,7 +336,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
{
__be16 proto;
@@ -352,6 +352,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
+static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+{
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ return false;
+
+ *inner_proto = __nf_flow_pppoe_proto(skb);
+
+ return true;
+}
+
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e27c28b612e4..3f1ed467f951 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -307,9 +307,23 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
return (void *)priv;
}
+
+/**
+ * enum nft_iter_type - nftables set iterator type
+ *
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
+ */
+enum nft_iter_type {
+ NFT_ITER_UNSPEC,
+ NFT_ITER_READ,
+ NFT_ITER_UPDATE,
+};
+
struct nft_set;
struct nft_set_iter {
u8 genmask;
+ enum nft_iter_type type:8;
unsigned int count;
unsigned int skip;
int err;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index cefe0c4bdae3..41ca14e81d55 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -117,6 +117,7 @@ struct Qdisc {
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
+ int owner;
unsigned long state;
unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
diff --git a/include/net/sock.h b/include/net/sock.h
index b5e00702acc1..b4b553df7870 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk)
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
extern int sysctl_mem_pcpu_rsv;
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
+
static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
@@ -1759,6 +1761,13 @@ static inline void sock_owned_by_me(const struct sock *sk)
#endif
}
+static inline void sock_not_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
static inline bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index 340ad43971e4..33f657d3c051 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -111,7 +111,8 @@ struct tls_strparser {
u32 stopped : 1;
u32 copy_mode : 1;
u32 mixed_decrypted : 1;
- u32 msg_ready : 1;
+
+ bool msg_ready;
struct strp_msg stm;
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3cb4dc9bd70e..3d54de168a6d 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -188,6 +188,8 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
{
if (!compl)
return;
+ if (!compl->tx_timestamp)
+ return;
*compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
}
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 4ce1988b2ba0..f40915d2ecee 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -12,6 +12,7 @@ struct request;
struct scsi_driver {
struct device_driver gendrv;
+ int (*resume)(struct device *);
void (*rescan)(struct device *);
blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index b259d42a1e1a..129001f600fc 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -767,6 +767,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
+extern int scsi_resume_device(struct scsi_device *sdev);
extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index a8bebac1e4b2..957295364a5e 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -56,6 +56,9 @@ struct hdac_ext_stream {
u32 pphcldpl;
u32 pphcldpu;
+ u32 pplcllpl;
+ u32 pplcllpu;
+
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index 53470d6a28d6..24dbe16684ae 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -143,6 +143,9 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
u32 bus_id, u8 link_type, u8 vbps, u8 bps,
u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+int intel_nhlt_ssp_device_type(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u8 virtual_bus_id);
+
#else
static inline struct nhlt_acpi_table *intel_nhlt_init(struct device *dev)
@@ -184,6 +187,13 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
return NULL;
}
+static inline int intel_nhlt_ssp_device_type(struct device *dev,
+ struct nhlt_acpi_table *nhlt,
+ u8 virtual_bus_id)
+{
+ return -EINVAL;
+}
+
#endif
#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 4038dd421150..1dc59005d241 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -15,7 +15,7 @@
#ifndef __TAS2781_TLV_H__
#define __TAS2781_TLV_H__
-static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
#endif
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index d801409b33cf..d55e53ac91bd 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
#define __def_pagetype_names \
+ DEF_PAGETYPE_NAME(hugetlb), \
DEF_PAGETYPE_NAME(offline), \
DEF_PAGETYPE_NAME(guard), \
DEF_PAGETYPE_NAME(table), \
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index ba2d96a1bc2f..f50fcafc69de 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
__field(unsigned int, timeout)
__field(u32, window_size)
__field(int, len)
- __string(acceptor, data)
+ __string_len(acceptor, data, len)
),
TP_fast_assign(
@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
__entry->timeout = timeout;
__entry->window_size = window_size;
__entry->len = len;
- strncpy(__get_str(acceptor), data, len);
+ __assign_str(acceptor, data);
),
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 7040e7ea80c7..1ca5c7e418fd 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -866,6 +866,17 @@ struct drm_color_lut {
};
/**
+ * struct drm_plane_size_hint - Plane size hints
+ *
+ * The plane SIZE_HINTS property blob contains an
+ * array of struct drm_plane_size_hint.
+ */
+struct drm_plane_size_hint {
+ __u16 width;
+ __u16 height;
+};
+
+/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
* HDR Metadata Infoframe as per CTA 861.G spec. This is expected
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index d87410a8443a..af024d90453d 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -77,11 +77,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
-#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT 0x1f
-#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE 0x20
-#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT 0x21
-#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE 0x22
-#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE 0x23
#define ETNA_MAX_PIPES 4
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 2ee338860b7e..d4d86e566e07 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PXP_STATUS 58
+/*
+ * Query if kernel allows marking a context to send a Freq hint to SLPC. This
+ * will enable use of the strategies allowed by the SLPC algorithm.
+ */
+#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
+
/* Must be kept compact -- no holes and well documented */
/**
@@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param {
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+
+/*
+ * I915_CONTEXT_PARAM_LOW_LATENCY:
+ *
+ * Mark this context as a low latency workload which requires aggressive GT
+ * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
+ * supports this per context flag.
+ */
+#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@@ -2623,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
+/*
+ * struct drm_i915_reset_stats - Return global reset and other context stats
+ *
+ * Driver keeps few stats for each contexts and also global reset count.
+ * This struct can be used to query those stats.
+ */
struct drm_i915_reset_stats {
+ /** @ctx_id: ID of the requested context */
__u32 ctx_id;
+
+ /** @flags: MBZ */
__u32 flags;
- /* All resets since boot/module reload, for all contexts */
+ /** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
- /* Number of batches lost when active in GPU, for this context */
+ /** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
- /* Number of batches lost pending for execution, for this context */
+ /** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
+ /** @pad: MBZ */
__u32 pad;
};
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index cd84227f1b42..8ad8d1cd1566 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -73,6 +73,16 @@ struct drm_nouveau_getparam {
__u64 value;
};
+/*
+ * Those are used to support selecting the main engine used on Kepler.
+ * This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle
+ */
+#define NOUVEAU_FIFO_ENGINE_GR 0x01
+#define NOUVEAU_FIFO_ENGINE_VP 0x02
+#define NOUVEAU_FIFO_ENGINE_PPP 0x04
+#define NOUVEAU_FIFO_ENGINE_BSP 0x08
+#define NOUVEAU_FIFO_ENGINE_CE 0x30
+
struct drm_nouveau_channel_alloc {
__u32 fb_ctxdma_handle;
__u32 tt_ctxdma_handle;
@@ -95,6 +105,18 @@ struct drm_nouveau_channel_free {
__s32 channel;
};
+struct drm_nouveau_notifierobj_alloc {
+ __u32 channel;
+ __u32 handle;
+ __u32 size;
+ __u32 offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ __s32 channel;
+ __u32 handle;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
new file mode 100644
index 000000000000..dadb05ab1235
--- /dev/null
+++ b/include/uapi/drm/panthor_drm.h
@@ -0,0 +1,945 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2023 Collabora ltd. */
+#ifndef _PANTHOR_DRM_H_
+#define _PANTHOR_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction
+ *
+ * This documentation describes the Panthor IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Panthor IOCTLs:
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ * naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ * pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ * zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ * directly passed to the ioctl). Those fields can be added at the end of
+ * the structure, or replace existing padding fields. Any new field being
+ * added must preserve the behavior that existed before those fields were
+ * added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ * main structure), iff those objects are passed a size to reflect the
+ * size known by the userspace driver (see drm_panthor_obj_array::stride
+ * or drm_panthor_dev_query::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ * ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ * (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ * the userspace driver doesn't have to trial and error to know which
+ * flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ * extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ * at the end of the drm_panthor_ioctl_id enum.
+ */
+
+/**
+ * DOC: MMIO regions exposed to userspace.
+ *
+ * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
+ *
+ * File offset for all MMIO regions being exposed to userspace. Don't use
+ * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
+ * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
+ * different offset on 32-bit and 64-bit systems.
+ *
+ * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
+ *
+ * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
+ * GPU cache flushing through CS instructions, but the flush reduction
+ * mechanism requires a flush_id. This flush_id could be queried with an
+ * ioctl, but Arm provides a well-isolated register page containing only this
+ * read-only register, so let's expose this page through a static mmap offset
+ * and allow direct mapping of this MMIO region so we can avoid the
+ * user <-> kernel round-trip.
+ */
+#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
+#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
+#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
+ DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
+ DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
+
+/**
+ * DOC: IOCTL IDs
+ *
+ * enum drm_panthor_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
+ * definitions instead.
+ */
+enum drm_panthor_ioctl_id {
+ /** @DRM_PANTHOR_DEV_QUERY: Query device information. */
+ DRM_PANTHOR_DEV_QUERY = 0,
+
+ /** @DRM_PANTHOR_VM_CREATE: Create a VM. */
+ DRM_PANTHOR_VM_CREATE,
+
+ /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
+ DRM_PANTHOR_VM_DESTROY,
+
+ /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
+ DRM_PANTHOR_VM_BIND,
+
+ /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
+ DRM_PANTHOR_VM_GET_STATE,
+
+ /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
+ DRM_PANTHOR_BO_CREATE,
+
+ /**
+ * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
+ * mmap to map a GEM object.
+ */
+ DRM_PANTHOR_BO_MMAP_OFFSET,
+
+ /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
+ DRM_PANTHOR_GROUP_CREATE,
+
+ /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
+ DRM_PANTHOR_GROUP_DESTROY,
+
+ /**
+ * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
+ * to a specific scheduling group.
+ */
+ DRM_PANTHOR_GROUP_SUBMIT,
+
+ /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
+ DRM_PANTHOR_GROUP_GET_STATE,
+
+ /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
+ DRM_PANTHOR_TILER_HEAP_CREATE,
+
+ /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
+ DRM_PANTHOR_TILER_HEAP_DESTROY,
+};
+
+/**
+ * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
+ * @__access: Access type. Must be R, W or RW.
+ * @__id: One of the DRM_PANTHOR_xxx id.
+ * @__type: Suffix of the type being passed to the IOCTL.
+ *
+ * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
+ * values instead.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
+ DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
+ struct drm_panthor_ ## __type)
+
+#define DRM_IOCTL_PANTHOR_DEV_QUERY \
+ DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
+#define DRM_IOCTL_PANTHOR_VM_CREATE \
+ DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
+#define DRM_IOCTL_PANTHOR_VM_DESTROY \
+ DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
+#define DRM_IOCTL_PANTHOR_VM_BIND \
+ DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
+#define DRM_IOCTL_PANTHOR_VM_GET_STATE \
+ DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
+#define DRM_IOCTL_PANTHOR_BO_CREATE \
+ DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
+#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \
+ DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
+#define DRM_IOCTL_PANTHOR_GROUP_CREATE \
+ DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
+#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \
+ DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
+#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \
+ DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
+#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \
+ DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
+#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \
+ DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
+#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \
+ DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
+
+/**
+ * DOC: IOCTL arguments
+ */
+
+/**
+ * struct drm_panthor_obj_array - Object array.
+ *
+ * This object is used to pass an array of objects whose size is subject to changes in
+ * future versions of the driver. In order to support this mutability, we pass a stride
+ * describing the size of the object as known by userspace.
+ *
+ * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
+ * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
+ * the object size.
+ */
+struct drm_panthor_obj_array {
+ /** @stride: Stride of object struct. Used for versioning. */
+ __u32 stride;
+
+ /** @count: Number of objects in the array. */
+ __u32 count;
+
+ /** @array: User pointer to an array of objects. */
+ __u64 array;
+};
+
+/**
+ * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
+ * @cnt: Number of elements in the array.
+ * @ptr: Pointer to the array to pass to the kernel.
+ *
+ * Macro initializing a drm_panthor_obj_array based on the object size as known
+ * by userspace.
+ */
+#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
+ { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/**
+ * enum drm_panthor_sync_op_flags - Synchronization operation flags.
+ */
+enum drm_panthor_sync_op_flags {
+ /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
+ DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
+
+ /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
+ DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
+
+ /**
+ * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
+ * object type.
+ */
+ DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
+
+ /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
+ DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
+
+ /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
+ DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
+};
+
+/**
+ * struct drm_panthor_sync_op - Synchronization operation.
+ */
+struct drm_panthor_sync_op {
+ /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
+ __u32 flags;
+
+ /** @handle: Sync handle. */
+ __u32 handle;
+
+ /**
+ * @timeline_value: MBZ if
+ * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
+ * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * enum drm_panthor_dev_query_type - Query type
+ *
+ * Place new types at the end, don't re-order, don't remove or replace.
+ */
+enum drm_panthor_dev_query_type {
+ /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
+ DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
+
+ /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
+ DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
+};
+
+/**
+ * struct drm_panthor_gpu_info - GPU information
+ *
+ * Structure grouping all queryable information relating to the GPU.
+ */
+struct drm_panthor_gpu_info {
+ /** @gpu_id : GPU ID. */
+ __u32 gpu_id;
+#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
+#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
+#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
+#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
+#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
+#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
+#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
+
+ /** @gpu_rev: GPU revision. */
+ __u32 gpu_rev;
+
+ /** @csf_id: Command stream frontend ID. */
+ __u32 csf_id;
+#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
+#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
+#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
+#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
+#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
+#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
+
+ /** @l2_features: L2-cache features. */
+ __u32 l2_features;
+
+ /** @tiler_features: Tiler features. */
+ __u32 tiler_features;
+
+ /** @mem_features: Memory features. */
+ __u32 mem_features;
+
+ /** @mmu_features: MMU features. */
+ __u32 mmu_features;
+#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
+
+ /** @thread_features: Thread features. */
+ __u32 thread_features;
+
+ /** @max_threads: Maximum number of threads. */
+ __u32 max_threads;
+
+ /** @thread_max_workgroup_size: Maximum workgroup size. */
+ __u32 thread_max_workgroup_size;
+
+ /**
+ * @thread_max_barrier_size: Maximum number of threads that can wait
+ * simultaneously on a barrier.
+ */
+ __u32 thread_max_barrier_size;
+
+ /** @coherency_features: Coherency features. */
+ __u32 coherency_features;
+
+ /** @texture_features: Texture features. */
+ __u32 texture_features[4];
+
+ /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
+ __u32 as_present;
+
+ /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
+ __u64 shader_present;
+
+ /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
+ __u64 l2_present;
+
+ /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
+ __u64 tiler_present;
+
+ /** @core_features: Used to discriminate core variants when they exist. */
+ __u32 core_features;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+};
+
+/**
+ * struct drm_panthor_csif_info - Command stream interface information
+ *
+ * Structure grouping all queryable information relating to the command stream interface.
+ */
+struct drm_panthor_csif_info {
+ /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
+ __u32 csg_slot_count;
+
+ /** @cs_slot_count: Number of command stream slots per group. */
+ __u32 cs_slot_count;
+
+ /** @cs_reg_count: Number of command stream registers. */
+ __u32 cs_reg_count;
+
+ /** @scoreboard_slot_count: Number of scoreboard slots. */
+ __u32 scoreboard_slot_count;
+
+ /**
+ * @unpreserved_cs_reg_count: Number of command stream registers reserved by
+ * the kernel driver to call a userspace command stream.
+ *
+ * All registers can be used by a userspace command stream, but the
+ * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
+ * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
+ */
+ __u32 unpreserved_cs_reg_count;
+
+ /**
+ * @pad: Padding field, set to zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
+ */
+struct drm_panthor_dev_query {
+ /** @type: the query type (see drm_panthor_dev_query_type). */
+ __u32 type;
+
+ /**
+ * @size: size of the type being queried.
+ *
+ * If pointer is NULL, size is updated by the driver to provide the
+ * output structure size. If pointer is not NULL, the driver will
+ * only copy min(size, actual_structure_size) bytes to the pointer,
+ * and update the size accordingly. This allows us to extend query
+ * types without breaking userspace.
+ */
+ __u32 size;
+
+ /**
+ * @pointer: user pointer to a query type struct.
+ *
+ * Pointer can be NULL, in which case, nothing is copied, but the
+ * actual structure size is returned. If not NULL, it must point to
+ * a location that's large enough to hold size bytes.
+ */
+ __u64 pointer;
+};
+
+/**
+ * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
+ */
+struct drm_panthor_vm_create {
+ /** @flags: VM flags, MBZ. */
+ __u32 flags;
+
+ /** @id: Returned VM ID. */
+ __u32 id;
+
+ /**
+ * @user_va_range: Size of the VA space reserved for user objects.
+ *
+ * The kernel will pick the remaining space to map kernel-only objects to the
+ * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
+ * ...). If the space left for kernel objects is too small, kernel object
+ * allocation will fail further down the road. One can use
+ * drm_panthor_gpu_info::mmu_features to extract the total virtual address
+ * range, and chose a user_va_range that leaves some space to the kernel.
+ *
+ * If user_va_range is zero, the kernel will pick a sensible value based on
+ * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
+ * split should leave enough VA space for userspace processes to support SVM,
+ * while still allowing the kernel to map some amount of kernel objects in
+ * the kernel VA range). The value chosen by the driver will be returned in
+ * @user_va_range.
+ *
+ * User VA space always starts at 0x0, kernel VA space is always placed after
+ * the user VA range.
+ */
+ __u64 user_va_range;
+};
+
+/**
+ * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
+ */
+struct drm_panthor_vm_destroy {
+ /** @id: ID of the VM to destroy. */
+ __u32 id;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+};
+
+/**
+ * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
+ */
+enum drm_panthor_vm_bind_op_flags {
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
+ *
+ * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
+ */
+ DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
+
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
+ *
+ * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
+ */
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
+
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
+ *
+ * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
+ */
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
+
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
+ */
+ DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
+
+ /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
+ DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
+
+ /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
+ DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
+
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
+ *
+ * Just serves as a synchronization point on a VM queue.
+ *
+ * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
+ * and drm_panthor_vm_bind_op::syncs contains at least one element.
+ */
+ DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
+};
+
+/**
+ * struct drm_panthor_vm_bind_op - VM bind operation
+ */
+struct drm_panthor_vm_bind_op {
+ /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
+ __u32 flags;
+
+ /**
+ * @bo_handle: Handle of the buffer object to map.
+ * MBZ for unmap or sync-only operations.
+ */
+ __u32 bo_handle;
+
+ /**
+ * @bo_offset: Buffer object offset.
+ * MBZ for unmap or sync-only operations.
+ */
+ __u64 bo_offset;
+
+ /**
+ * @va: Virtual address to map/unmap.
+ * MBZ for sync-only operations.
+ */
+ __u64 va;
+
+ /**
+ * @size: Size to map/unmap.
+ * MBZ for sync-only operations.
+ */
+ __u64 size;
+
+ /**
+ * @syncs: Array of struct drm_panthor_sync_op synchronization
+ * operations.
+ *
+ * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
+ * the drm_panthor_vm_bind object containing this VM bind operation.
+ *
+ * This array shall not be empty for sync-only operations.
+ */
+ struct drm_panthor_obj_array syncs;
+
+};
+
+/**
+ * enum drm_panthor_vm_bind_flags - VM bind flags
+ */
+enum drm_panthor_vm_bind_flags {
+ /**
+ * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
+ * queue instead of being executed synchronously.
+ */
+ DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
+};
+
+/**
+ * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
+ */
+struct drm_panthor_vm_bind {
+ /** @vm_id: VM targeted by the bind request. */
+ __u32 vm_id;
+
+ /** @flags: Combination of drm_panthor_vm_bind_flags flags. */
+ __u32 flags;
+
+ /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
+ struct drm_panthor_obj_array ops;
+};
+
+/**
+ * enum drm_panthor_vm_state - VM states.
+ */
+enum drm_panthor_vm_state {
+ /**
+ * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
+ *
+ * New VM operations will be accepted on this VM.
+ */
+ DRM_PANTHOR_VM_STATE_USABLE,
+
+ /**
+ * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
+ *
+ * Something put the VM in an unusable state (like an asynchronous
+ * VM_BIND request failing for any reason).
+ *
+ * Once the VM is in this state, all new MAP operations will be
+ * rejected, and any GPU job targeting this VM will fail.
+ * UNMAP operations are still accepted.
+ *
+ * The only way to recover from an unusable VM is to create a new
+ * VM, and destroy the old one.
+ */
+ DRM_PANTHOR_VM_STATE_UNUSABLE,
+};
+
+/**
+ * struct drm_panthor_vm_get_state - Get VM state.
+ */
+struct drm_panthor_vm_get_state {
+ /** @vm_id: VM targeted by the get_state request. */
+ __u32 vm_id;
+
+ /**
+ * @state: state returned by the driver.
+ *
+ * Must be one of the enum drm_panthor_vm_state values.
+ */
+ __u32 state;
+};
+
+/**
+ * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
+ */
+enum drm_panthor_bo_flags {
+ /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
+ DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
+};
+
+/**
+ * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
+ */
+struct drm_panthor_bo_create {
+ /**
+ * @size: Requested size for the object
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+
+ /**
+ * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
+ */
+ __u32 flags;
+
+ /**
+ * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
+ *
+ * If not zero, the field must refer to a valid VM ID, and implies that:
+ * - the buffer object will only ever be bound to that VM
+ * - cannot be exported as a PRIME fd
+ */
+ __u32 exclusive_vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+};
+
+/**
+ * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
+ */
+struct drm_panthor_bo_mmap_offset {
+ /** @handle: Handle of the object we want an mmap offset for. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /** @offset: The fake offset to use for subsequent mmap calls. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_panthor_queue_create - Queue creation arguments.
+ */
+struct drm_panthor_queue_create {
+ /**
+ * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
+ * 15 being the highest priority.
+ */
+ __u8 priority;
+
+ /** @pad: Padding fields, MBZ. */
+ __u8 pad[3];
+
+ /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
+ __u32 ringbuf_size;
+};
+
+/**
+ * enum drm_panthor_group_priority - Scheduling group priority
+ */
+enum drm_panthor_group_priority {
+ /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
+ PANTHOR_GROUP_PRIORITY_LOW = 0,
+
+ /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
+ PANTHOR_GROUP_PRIORITY_MEDIUM,
+
+ /** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */
+ PANTHOR_GROUP_PRIORITY_HIGH,
+};
+
+/**
+ * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
+ */
+struct drm_panthor_group_create {
+ /** @queues: Array of drm_panthor_queue_create elements. */
+ struct drm_panthor_obj_array queues;
+
+ /**
+ * @max_compute_cores: Maximum number of cores that can be used by compute
+ * jobs across CS queues bound to this group.
+ *
+ * Must be less or equal to the number of bits set in @compute_core_mask.
+ */
+ __u8 max_compute_cores;
+
+ /**
+ * @max_fragment_cores: Maximum number of cores that can be used by fragment
+ * jobs across CS queues bound to this group.
+ *
+ * Must be less or equal to the number of bits set in @fragment_core_mask.
+ */
+ __u8 max_fragment_cores;
+
+ /**
+ * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
+ * across CS queues bound to this group.
+ *
+ * Must be less or equal to the number of bits set in @tiler_core_mask.
+ */
+ __u8 max_tiler_cores;
+
+ /** @priority: Group priority (see enum drm_panthor_group_priority). */
+ __u8 priority;
+
+ /** @pad: Padding field, MBZ. */
+ __u32 pad;
+
+ /**
+ * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
+ *
+ * This field must have at least @max_compute_cores bits set.
+ *
+ * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
+ */
+ __u64 compute_core_mask;
+
+ /**
+ * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
+ *
+ * This field must have at least @max_fragment_cores bits set.
+ *
+ * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
+ */
+ __u64 fragment_core_mask;
+
+ /**
+ * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
+ *
+ * This field must have at least @max_tiler_cores bits set.
+ *
+ * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
+ */
+ __u64 tiler_core_mask;
+
+ /**
+ * @vm_id: VM ID to bind this group to.
+ *
+ * All submission to queues bound to this group will use this VM.
+ */
+ __u32 vm_id;
+
+ /**
+ * @group_handle: Returned group handle. Passed back when submitting jobs or
+ * destroying a group.
+ */
+ __u32 group_handle;
+};
+
+/**
+ * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
+ */
+struct drm_panthor_group_destroy {
+ /** @group_handle: Group to destroy */
+ __u32 group_handle;
+
+ /** @pad: Padding field, MBZ. */
+ __u32 pad;
+};
+
+/**
+ * struct drm_panthor_queue_submit - Job submission arguments.
+ *
+ * This is describing the userspace command stream to call from the kernel
+ * command stream ring-buffer. Queue submission is always part of a group
+ * submission, taking one or more jobs to submit to the underlying queues.
+ */
+struct drm_panthor_queue_submit {
+ /** @queue_index: Index of the queue inside a group. */
+ __u32 queue_index;
+
+ /**
+ * @stream_size: Size of the command stream to execute.
+ *
+ * Must be 64-bit/8-byte aligned (the size of a CS instruction)
+ *
+ * Can be zero if stream_addr is zero too.
+ */
+ __u32 stream_size;
+
+ /**
+ * @stream_addr: GPU address of the command stream to execute.
+ *
+ * Must be aligned on 64-byte.
+ *
+ * Can be zero is stream_size is zero too.
+ */
+ __u64 stream_addr;
+
+ /**
+ * @latest_flush: FLUSH_ID read at the time the stream was built.
+ *
+ * This allows cache flush elimination for the automatic
+ * flush+invalidate(all) done at submission time, which is needed to
+ * ensure the GPU doesn't get garbage when reading the indirect command
+ * stream buffers. If you want the cache flush to happen
+ * unconditionally, pass a zero here.
+ */
+ __u32 latest_flush;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /** @syncs: Array of struct drm_panthor_sync_op sync operations. */
+ struct drm_panthor_obj_array syncs;
+};
+
+/**
+ * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
+ */
+struct drm_panthor_group_submit {
+ /** @group_handle: Handle of the group to queue jobs to. */
+ __u32 group_handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /** @queue_submits: Array of drm_panthor_queue_submit objects. */
+ struct drm_panthor_obj_array queue_submits;
+};
+
+/**
+ * enum drm_panthor_group_state_flags - Group state flags
+ */
+enum drm_panthor_group_state_flags {
+ /**
+ * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
+ *
+ * When a group ends up with this flag set, no jobs can be submitted to its queues.
+ */
+ DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
+
+ /**
+ * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
+ *
+ * When a group ends up with this flag set, no jobs can be submitted to its queues.
+ */
+ DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
+};
+
+/**
+ * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
+ *
+ * Used to query the state of a group and decide whether a new group should be created to
+ * replace it.
+ */
+struct drm_panthor_group_get_state {
+ /** @group_handle: Handle of the group to query state on */
+ __u32 group_handle;
+
+ /**
+ * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
+ * group state.
+ */
+ __u32 state;
+
+ /** @fatal_queues: Bitmask of queues that faced fatal faults. */
+ __u32 fatal_queues;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
+ */
+struct drm_panthor_tiler_heap_create {
+ /** @vm_id: VM ID the tiler heap should be mapped to */
+ __u32 vm_id;
+
+ /** @initial_chunk_count: Initial number of chunks to allocate. */
+ __u32 initial_chunk_count;
+
+ /** @chunk_size: Chunk size. Must be a power of two at least 256KB large. */
+ __u32 chunk_size;
+
+ /** @max_chunks: Maximum number of chunks that can be allocated. */
+ __u32 max_chunks;
+
+ /**
+ * @target_in_flight: Maximum number of in-flight render passes.
+ *
+ * If the heap has more than tiler jobs in-flight, the FW will wait for render
+ * passes to finish before queuing new tiler jobs.
+ */
+ __u32 target_in_flight;
+
+ /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
+ __u32 handle;
+
+ /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
+ __u64 tiler_heap_ctx_gpu_va;
+
+ /**
+ * @first_heap_chunk_gpu_va: First heap chunk.
+ *
+ * The tiler heap is formed of heap chunks forming a single-link list. This
+ * is the first element in the list.
+ */
+ __u64 first_heap_chunk_gpu_va;
+};
+
+/**
+ * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
+ */
+struct drm_panthor_tiler_heap_destroy {
+ /** @handle: Handle of the tiler heap to destroy */
+ __u32 handle;
+
+ /** @pad: Padding field, MBZ. */
+ __u32 pad;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _PANTHOR_DRM_H_ */
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 538a3ac95c54..1446c3bae515 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -459,8 +459,16 @@ struct drm_xe_gt {
* by struct drm_xe_query_mem_regions' mem_class.
*/
__u64 far_mem_regions;
+ /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
+ __u16 ip_ver_major;
+ /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
+ __u16 ip_ver_minor;
+ /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
+ __u16 ip_ver_rev;
+ /** @pad2: MBZ */
+ __u16 pad2;
/** @reserved: Reserved */
- __u64 reserved[8];
+ __u64 reserved[7];
};
/**
@@ -510,9 +518,9 @@ struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
__u16 gt_id;
-#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
-#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
-#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+#define DRM_XE_TOPO_DSS_GEOMETRY 1
+#define DRM_XE_TOPO_DSS_COMPUTE 2
+#define DRM_XE_TOPO_EU_PER_DSS 4
/** @type: type of mask */
__u16 type;
@@ -583,6 +591,7 @@ struct drm_xe_query_engine_cycles {
struct drm_xe_query_uc_fw_version {
/** @uc_type: The micro-controller type to query firmware version */
#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
+#define XE_QUERY_UC_TYPE_HUC 1
__u16 uc_type;
/** @pad: MBZ */
@@ -862,6 +871,12 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
+ * to ensure write protection
+ * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
+ * MAP operation immediately rather than deferring the MAP to the page
+ * fault handler. This is implied on a non-faulting VM as there is no
+ * fault handler to defer to.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
@@ -954,6 +969,8 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */
__u32 op;
+#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 9ce46edc62a5..2040a470ddb4 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -913,14 +913,25 @@ enum kfd_dbg_trap_exception_code {
KFD_EC_MASK(EC_DEVICE_NEW))
#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
+#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
/* Checks for exception code types for KFD search */
+#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
+ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
/* Runtime enable states */
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 43c51698195c..842bf1201ac4 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -57,7 +57,7 @@ enum vdpa_attr {
VDPA_ATTR_DEV_FEATURES, /* u64 */
VDPA_ATTR_DEV_BLK_CFG_CAPACITY, /* u64 */
- VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, /* u16 */
@@ -70,8 +70,8 @@ enum vdpa_attr {
VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN,/* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, /* u32 */
- VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, /* u8 */
- VDPA_ATTR_DEV_BLK_CFG_FLUSH, /* u8 */
+ VDPA_ATTR_DEV_BLK_READ_ONLY, /* u8 */
+ VDPA_ATTR_DEV_BLK_FLUSH, /* u8 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index bea697390613..b95dd84eef2d 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -179,12 +179,6 @@
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
-/* Get the count of all virtqueues */
-#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
-
-/* Get the number of virtqueue groups. */
-#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
-
/* Get the number of address spaces. */
#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
@@ -228,10 +222,17 @@
#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
struct vhost_vring_state)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
/* Get the queue size of a specific virtqueue.
* userspace set the vring index in vhost_vring_state.index
* kernel set the queue size in vhost_vring_state.num
*/
-#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x80, \
+#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
#endif
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
index c72ce387286a..30a5c1a59376 100644
--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -382,7 +382,7 @@ struct mpi3mr_bsg_in_reply_buf {
__u8 mpi_reply_type;
__u8 rsvd1;
__u16 rsvd2;
- __u8 reply_buf[1];
+ __u8 reply_buf[];
};
/**
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index cb2afcebbdf5..a35e12f8e68b 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -328,6 +328,7 @@ struct ufs_pwr_mode_info {
* @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
+ * @config_scsi_dev: called to configure SCSI device parameters
*/
struct ufs_hba_variant_ops {
const char *name;
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 5d5c0b8efff2..c71ddb6d4691 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -19,12 +19,6 @@
#include <vdso/time32.h>
#include <vdso/time64.h>
-#ifdef CONFIG_ARM64
-#include <asm/page-def.h>
-#else
-#include <asm/page.h>
-#endif
-
#ifdef CONFIG_ARCH_HAS_VDSO_DATA
#include <asm/vdso/data.h>
#else
@@ -132,7 +126,7 @@ extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden
*/
union vdso_data_store {
struct vdso_data data[CS_BASES];
- u8 page[PAGE_SIZE];
+ u8 page[1U << CONFIG_PAGE_SHIFT];
};
/*
diff --git a/init/Kconfig b/init/Kconfig
index aa02aec6aa7d..664bedb9a71f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1899,11 +1899,11 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
+ depends on !CFI_CLANG
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
- select CONSTRUCTORS
help
Enables Rust support in the kernel.
diff --git a/init/initramfs.c b/init/initramfs.c
index da79760b8be3..a298a3854a80 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -367,7 +367,7 @@ static int __init do_name(void)
if (S_ISREG(mode)) {
int ml = maybe_link();
if (ml >= 0) {
- int openflags = O_WRONLY|O_CREAT;
+ int openflags = O_WRONLY|O_CREAT|O_LARGEFILE;
if (ml != 1)
openflags |= O_TRUNC;
wfile = filp_open(collected, openflags, mode);
@@ -682,7 +682,7 @@ static void __init populate_initrd_image(char *err)
printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
err);
- file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
+ file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
if (IS_ERR(file))
return;
diff --git a/init/main.c b/init/main.c
index 2ca52474d0c3..5dcf5274c09c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -487,6 +487,11 @@ static int __init warn_bootconfig(char *str)
early_param("bootconfig", warn_bootconfig);
+bool __init cmdline_has_extra_options(void)
+{
+ return extra_command_line || extra_init_args;
+}
+
/* Change NUL term back to "=", to make "param" the whole string. */
static void __init repair_env_string(char *param, char *val)
{
@@ -631,6 +636,8 @@ static void __init setup_command_line(char *command_line)
if (!saved_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+ len = xlen + strlen(command_line) + 1;
+
static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
if (!static_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 5d4b448fdc50..c170a2b8d2cf 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -147,6 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
static void io_queue_sqe(struct io_kiocb *req);
struct kmem_cache *req_cachep;
+static struct workqueue_struct *iou_wq __ro_after_init;
static int __read_mostly sysctl_io_uring_disabled;
static int __read_mostly sysctl_io_uring_group = -1;
@@ -350,7 +351,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
err:
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
return NULL;
@@ -1982,10 +1982,15 @@ fail:
err = -EBADFD;
if (!io_file_can_poll(req))
goto fail;
- err = -ECANCELED;
- if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
- goto fail;
- return;
+ if (req->file->f_flags & O_NONBLOCK ||
+ req->file->f_mode & FMODE_NOWAIT) {
+ err = -ECANCELED;
+ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
+ goto fail;
+ return;
+ } else {
+ req->flags &= ~REQ_F_APOLL_MULTISHOT;
+ }
}
if (req->flags & REQ_F_FORCE_ASYNC) {
@@ -2597,19 +2602,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (__io_cqring_events_user(ctx) >= min_events)
return 0;
- if (sig) {
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall())
- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
- sigsz);
- else
-#endif
- ret = set_user_sigmask(sig, sigsz);
-
- if (ret)
- return ret;
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
@@ -2628,6 +2620,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
io_napi_adjust_timeout(ctx, &iowq, &ts);
}
+ if (sig) {
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+ sigsz);
+ else
+#endif
+ ret = set_user_sigmask(sig, sigsz);
+
+ if (ret)
+ return ret;
+ }
+
io_napi_busy_loop(ctx, &iowq);
trace_io_uring_cqring_wait(ctx, min_events);
@@ -2926,7 +2931,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_napi_free(ctx);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
}
@@ -3161,7 +3165,7 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
* noise and overhead, there's no discernable change in runtime
* over using system_wq.
*/
- queue_work(system_unbound_wq, &ctx->exit_work);
+ queue_work(iou_wq, &ctx->exit_work);
}
static int io_uring_release(struct inode *inode, struct file *file)
@@ -3443,14 +3447,15 @@ static void *io_uring_validate_mmap_request(struct file *file,
ptr = ctx->sq_sqes;
break;
case IORING_OFF_PBUF_RING: {
+ struct io_buffer_list *bl;
unsigned int bgid;
bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
- rcu_read_lock();
- ptr = io_pbuf_get_address(ctx, bgid);
- rcu_read_unlock();
- if (!ptr)
- return ERR_PTR(-EINVAL);
+ bl = io_pbuf_get_bl(ctx, bgid);
+ if (IS_ERR(bl))
+ return bl;
+ ptr = bl->buf_ring;
+ io_put_bl(ctx, bl);
break;
}
default:
@@ -4185,6 +4190,8 @@ static int __init io_uring_init(void)
io_buf_cachep = KMEM_CACHE(io_buffer,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
+ iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
+
#ifdef CONFIG_SYSCTL
register_sysctl_init("kernel", kernel_io_uring_disabled_table);
#endif
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 693c26da4ee1..3aa16e27f509 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -17,8 +17,6 @@
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
-#define BGID_ARRAY 64
-
/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16)
@@ -40,13 +38,9 @@ struct io_buf_free {
int inuse;
};
-static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
- struct io_buffer_list *bl,
- unsigned int bgid)
+static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
+ unsigned int bgid)
{
- if (bl && bgid < BGID_ARRAY)
- return &bl[bgid];
-
return xa_load(&ctx->io_bl_xa, bgid);
}
@@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
{
lockdep_assert_held(&ctx->uring_lock);
- return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
+ return __io_buffer_get_list(ctx, bgid);
}
static int io_buffer_add_list(struct io_ring_ctx *ctx,
@@ -67,11 +61,7 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
* always under the ->uring_lock, but the RCU lookup from mmap does.
*/
bl->bgid = bgid;
- smp_store_release(&bl->is_ready, 1);
-
- if (bgid < BGID_ARRAY)
- return 0;
-
+ atomic_set(&bl->refs, 1);
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
@@ -208,24 +198,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
return ret;
}
-static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
-{
- struct io_buffer_list *bl;
- int i;
-
- bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
- if (!bl)
- return -ENOMEM;
-
- for (i = 0; i < BGID_ARRAY; i++) {
- INIT_LIST_HEAD(&bl[i].buf_list);
- bl[i].bgid = i;
- }
-
- smp_store_release(&ctx->io_bl, bl);
- return 0;
-}
-
/*
* Mark the given mapped range as free for reuse
*/
@@ -294,24 +266,24 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
return i;
}
+void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+{
+ if (atomic_dec_and_test(&bl->refs)) {
+ __io_remove_buffers(ctx, bl, -1U);
+ kfree_rcu(bl, rcu);
+ }
+}
+
void io_destroy_buffers(struct io_ring_ctx *ctx)
{
struct io_buffer_list *bl;
struct list_head *item, *tmp;
struct io_buffer *buf;
unsigned long index;
- int i;
-
- for (i = 0; i < BGID_ARRAY; i++) {
- if (!ctx->io_bl)
- break;
- __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
- }
xa_for_each(&ctx->io_bl_xa, index, bl) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
- __io_remove_buffers(ctx, bl, -1U);
- kfree_rcu(bl, rcu);
+ io_put_bl(ctx, bl);
}
/*
@@ -489,12 +461,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
io_ring_submit_lock(ctx, issue_flags);
- if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
- ret = io_init_bl_list(ctx);
- if (ret)
- goto err;
- }
-
bl = io_buffer_get_list(ctx, p->bgid);
if (unlikely(!bl)) {
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
@@ -507,14 +473,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (ret) {
/*
* Doesn't need rcu free as it was never visible, but
- * let's keep it consistent throughout. Also can't
- * be a lower indexed array group, as adding one
- * where lookup failed cannot happen.
+ * let's keep it consistent throughout.
*/
- if (p->bgid >= BGID_ARRAY)
- kfree_rcu(bl, rcu);
- else
- WARN_ON_ONCE(1);
+ kfree_rcu(bl, rcu);
goto err;
}
}
@@ -679,12 +640,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (reg.ring_entries >= 65536)
return -EINVAL;
- if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
- int ret = io_init_bl_list(ctx);
- if (ret)
- return ret;
- }
-
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
@@ -733,11 +688,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (!bl->is_buf_ring)
return -EINVAL;
- __io_remove_buffers(ctx, bl, -1U);
- if (bl->bgid >= BGID_ARRAY) {
- xa_erase(&ctx->io_bl_xa, bl->bgid);
- kfree_rcu(bl, rcu);
- }
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+ io_put_bl(ctx, bl);
return 0;
}
@@ -767,23 +719,35 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
return 0;
}
-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
+struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
+ unsigned long bgid)
{
struct io_buffer_list *bl;
+ bool ret;
- bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
-
- if (!bl || !bl->is_mmap)
- return NULL;
/*
- * Ensure the list is fully setup. Only strictly needed for RCU lookup
- * via mmap, and in that case only for the array indexed groups. For
- * the xarray lookups, it's either visible and ready, or not at all.
+ * We have to be a bit careful here - we're inside mmap and cannot grab
+ * the uring_lock. This means the buffer_list could be simultaneously
+ * going away, if someone is trying to be sneaky. Look it up under rcu
+ * so we know it's not going away, and attempt to grab a reference to
+ * it. If the ref is already zero, then fail the mapping. If successful,
+ * the caller will call io_put_bl() to drop the the reference at at the
+ * end. This may then safely free the buffer_list (and drop the pages)
+ * at that point, vm_insert_pages() would've already grabbed the
+ * necessary vma references.
*/
- if (!smp_load_acquire(&bl->is_ready))
- return NULL;
-
- return bl->buf_ring;
+ rcu_read_lock();
+ bl = xa_load(&ctx->io_bl_xa, bgid);
+ /* must be a mmap'able buffer ring and have pages */
+ ret = false;
+ if (bl && bl->is_mmap)
+ ret = atomic_inc_not_zero(&bl->refs);
+ rcu_read_unlock();
+
+ if (ret)
+ return bl;
+
+ return ERR_PTR(-EINVAL);
}
/*
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 1c7b654ee726..df365b8860cf 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -25,12 +25,12 @@ struct io_buffer_list {
__u16 head;
__u16 mask;
+ atomic_t refs;
+
/* ring mapped provided buffers */
__u8 is_buf_ring;
/* ring mapped provided buffers, but mmap'ed by application */
__u8 is_mmap;
- /* bl is visible from an RCU point of view for lookup */
- __u8 is_ready;
};
struct io_buffer {
@@ -61,7 +61,9 @@ void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
+void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
+struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
+ unsigned long bgid);
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
{
diff --git a/io_uring/net.c b/io_uring/net.c
index 1e7665ff6ef7..4afb475d4197 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1276,6 +1276,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
kmsg = req->async_data;
+ kmsg->msg.msg_control_user = sr->msg_control;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 0585ebcc9773..c8d48287439e 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -937,6 +937,13 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
ret = __io_read(req, issue_flags);
/*
+ * If the file doesn't support proper NOWAIT, then disable multishot
+ * and stay in single shot mode.
+ */
+ if (!io_file_supports_nowait(req))
+ req->flags &= ~REQ_F_APOLL_MULTISHOT;
+
+ /*
* If we get -EAGAIN, recycle our buffer and just let normal poll
* handling arm it.
*/
@@ -955,7 +962,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
/*
* Any successful return value will keep the multishot read armed.
*/
- if (ret > 0) {
+ if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
/*
* Put our buffer and post a CQE. If we fail to post a CQE, then
* jump to the termination path. This request is then done.
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 368c5d86b5b7..e497011261b8 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -4,7 +4,7 @@ ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
-CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
+CFLAGS_core.o += -Wno-override-init $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 86571e760dd6..343c3456c8dd 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -38,7 +38,7 @@
/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
#define GUARD_SZ (1ull << sizeof(((struct bpf_insn *)0)->off) * 8)
-#define KERN_VM_SZ ((1ull << 32) + GUARD_SZ)
+#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
struct bpf_arena {
struct bpf_map map;
@@ -110,7 +110,7 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
return ERR_PTR(-EINVAL);
vm_range = (u64)attr->max_entries * PAGE_SIZE;
- if (vm_range > (1ull << 32))
+ if (vm_range > SZ_4G)
return ERR_PTR(-E2BIG);
if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))
@@ -301,7 +301,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad
if (pgoff)
return -EINVAL;
- if (len > (1ull << 32))
+ if (len > SZ_4G)
return -E2BIG;
/* if user_vm_start was specified at arena creation time */
@@ -322,7 +322,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad
if (WARN_ON_ONCE(arena->user_vm_start))
/* checks at map creation time should prevent this */
return -EFAULT;
- return round_up(ret, 1ull << 32);
+ return round_up(ret, SZ_4G);
}
static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
@@ -346,7 +346,7 @@ static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
return -EBUSY;
/* Earlier checks should prevent this */
- if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > (1ull << 32) || vma->vm_pgoff))
+ if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff))
return -EFAULT;
if (remember_vma(arena, vma))
@@ -420,7 +420,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
if (uaddr & ~PAGE_MASK)
return 0;
pgoff = compute_pgoff(arena, uaddr);
- if (pgoff + page_cnt > page_cnt_max)
+ if (pgoff > page_cnt_max - page_cnt)
/* requested address will be outside of user VMA */
return 0;
}
@@ -447,7 +447,13 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
goto out;
uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
- /* Earlier checks make sure that uaddr32 + page_cnt * PAGE_SIZE will not overflow 32-bit */
+ /* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1
+ * will not overflow 32-bit. Lower 32-bit need to represent
+ * contiguous user address range.
+ * Map these pages at kern_vm_start base.
+ * kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow
+ * lower 32-bit and it's ok.
+ */
ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);
if (ret) {
@@ -510,6 +516,11 @@ static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
if (!page)
continue;
if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */
+ /* Optimization for the common case of page_cnt==1:
+ * If page wasn't mapped into some user vma there
+ * is no need to call zap_pages which is slow. When
+ * page_cnt is big it's faster to do the batched zap.
+ */
zap_pages(arena, full_uaddr, 1);
vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
__free_page(page);
diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
index addf3dd57b59..35e1ddca74d2 100644
--- a/kernel/bpf/bloom_filter.c
+++ b/kernel/bpf/bloom_filter.c
@@ -80,6 +80,18 @@ static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key
return -EOPNOTSUPP;
}
+/* Called from syscall */
+static int bloom_map_alloc_check(union bpf_attr *attr)
+{
+ if (attr->value_size > KMALLOC_MAX_SIZE)
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements.
+ */
+ return -E2BIG;
+
+ return 0;
+}
+
static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
{
u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
@@ -191,6 +203,7 @@ static u64 bloom_map_mem_usage(const struct bpf_map *map)
BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
const struct bpf_map_ops bloom_filter_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = bloom_map_alloc_check,
.map_alloc = bloom_map_alloc,
.map_free = bloom_map_free,
.map_get_next_key = bloom_map_get_next_key,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a89587859571..449b9a5d3fe3 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2548,7 +2548,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ae2ff73bde7e..c287925471f6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3024,17 +3024,46 @@ void bpf_link_inc(struct bpf_link *link)
atomic64_inc(&link->refcnt);
}
+static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
+{
+ struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
+
+ /* free bpf_link and its containing memory */
+ link->ops->dealloc_deferred(link);
+}
+
+static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
+{
+ if (rcu_trace_implies_rcu_gp())
+ bpf_link_defer_dealloc_rcu_gp(rcu);
+ else
+ call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
+}
+
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
+ bool sleepable = false;
+
bpf_link_free_id(link->id);
if (link->prog) {
+ sleepable = link->prog->sleepable;
/* detach BPF program, clean up used resources */
link->ops->release(link);
bpf_prog_put(link->prog);
}
- /* free bpf_link and its containing memory */
- link->ops->dealloc(link);
+ if (link->ops->dealloc_deferred) {
+ /* schedule BPF link deallocation; if underlying BPF program
+ * is sleepable, we need to first wait for RCU tasks trace
+ * sync, then go through "classic" RCU grace period
+ */
+ if (sleepable)
+ call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+ else
+ call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
+ }
+ if (link->ops->dealloc)
+ link->ops->dealloc(link);
}
static void bpf_link_put_deferred(struct work_struct *work)
@@ -3544,7 +3573,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_raw_tp_link_lops = {
.release = bpf_raw_tp_link_release,
- .dealloc = bpf_raw_tp_link_dealloc,
+ .dealloc_deferred = bpf_raw_tp_link_dealloc,
.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
.fill_link_info = bpf_raw_tp_link_fill_link_info,
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 63749ad5ac6b..98188379d5c7 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5682,6 +5682,13 @@ static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
return reg->type == PTR_TO_FLOW_KEYS;
}
+static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
+{
+ const struct bpf_reg_state *reg = reg_state(env, regno);
+
+ return reg->type == PTR_TO_ARENA;
+}
+
static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
#ifdef CONFIG_NET
[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
@@ -6694,6 +6701,11 @@ static int check_stack_access_within_bounds(
err = check_stack_slot_within_bounds(env, min_off, state, type);
if (!err && max_off > 0)
err = -EINVAL; /* out of stack access into non-negative offsets */
+ if (!err && access_size < 0)
+ /* access_size should not be negative (or overflow an int); others checks
+ * along the way should have prevented such an access.
+ */
+ err = -EFAULT; /* invalid negative access size; integer overflow? */
if (err) {
if (tnum_is_const(reg->var_off)) {
@@ -7019,7 +7031,8 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg) ||
- is_sk_reg(env, insn->dst_reg)) {
+ is_sk_reg(env, insn->dst_reg) ||
+ is_arena_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str(env, reg_state(env, insn->dst_reg)->type));
@@ -14014,6 +14027,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");
return -EINVAL;
}
+ if (!env->prog->aux->arena) {
+ verbose(env, "addr_space_cast insn can only be used in a program that has an associated arena\n");
+ return -EINVAL;
+ }
} else {
if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&
insn->off != 32) || insn->imm) {
@@ -14046,8 +14063,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (insn->imm) {
/* off == BPF_ADDR_SPACE_CAST */
mark_reg_unknown(env, regs, insn->dst_reg);
- if (insn->imm == 1) /* cast from as(1) to as(0) */
+ if (insn->imm == 1) { /* cast from as(1) to as(0) */
dst_reg->type = PTR_TO_ARENA;
+ /* PTR_TO_ARENA is 32-bit */
+ dst_reg->subreg_def = env->insn_idx + 1;
+ }
} else if (insn->off == 0) {
/* case: R1 = R2
* copy register state to dest reg
@@ -18359,15 +18379,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
+ fdput(f);
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
+ fdput(f);
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
+ fdput(f);
return -EINVAL;
}
}
@@ -19601,8 +19624,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
(((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
/* convert to 32-bit mov that clears upper 32-bit */
insn->code = BPF_ALU | BPF_MOV | BPF_X;
- /* clear off, so it's a normal 'wX = wY' from JIT pov */
+ /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */
insn->off = 0;
+ insn->imm = 0;
} /* cast from as(0) to as(1) should be handled by JIT */
goto next_insn;
}
diff --git a/kernel/configs/hardening.config b/kernel/configs/hardening.config
index 7a5bbfc024b7..4b4cfcba3190 100644
--- a/kernel/configs/hardening.config
+++ b/kernel/configs/hardening.config
@@ -39,11 +39,12 @@ CONFIG_UBSAN=y
CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
-# CONFIG_UBSAN_DIV_ZERO
-# CONFIG_UBSAN_UNREACHABLE
-# CONFIG_UBSAN_BOOL
-# CONFIG_UBSAN_ENUM
-# CONFIG_UBSAN_ALIGNMENT
+# CONFIG_UBSAN_DIV_ZERO is not set
+# CONFIG_UBSAN_UNREACHABLE is not set
+# CONFIG_UBSAN_SIGNED_WRAP is not set
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+# CONFIG_UBSAN_ALIGNMENT is not set
# Sampling-based heap out-of-bounds and use-after-free detection.
CONFIG_KFENCE=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8f6affd051f7..63447eb85dab 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -3196,6 +3196,7 @@ void __init boot_cpu_hotplug_init(void)
this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
}
+#ifdef CONFIG_CPU_MITIGATIONS
/*
* These are used for a global "mitigations=" cmdline option for toggling
* optional CPU mitigations.
@@ -3206,8 +3207,7 @@ enum cpu_mitigations {
CPU_MITIGATIONS_AUTO_NOSMT,
};
-static enum cpu_mitigations cpu_mitigations __ro_after_init =
- CPU_MITIGATIONS_AUTO;
+static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
@@ -3223,7 +3223,6 @@ static int __init mitigations_parse_cmdline(char *arg)
return 0;
}
-early_param("mitigations", mitigations_parse_cmdline);
/* mitigations=off */
bool cpu_mitigations_off(void)
@@ -3238,3 +3237,11 @@ bool cpu_mitigations_auto_nosmt(void)
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
+#else
+static int __init mitigations_parse_cmdline(char *arg)
+{
+ pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
+ return 0;
+}
+#endif
+early_param("mitigations", mitigations_parse_cmdline);
diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c
index bbb6c3cb00e4..066668799f75 100644
--- a/kernel/crash_reserve.c
+++ b/kernel/crash_reserve.c
@@ -366,8 +366,10 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
insert_resource(&iomem_resource, &crashk_low_res);
#endif
+#endif
return 0;
}
@@ -448,8 +450,12 @@ retry:
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+ insert_resource(&iomem_resource, &crashk_res);
+#endif
}
+#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
static __init int insert_crashkernel_resources(void)
{
if (crashk_res.start < crashk_res.end)
@@ -462,3 +468,4 @@ static __init int insert_crashkernel_resources(void)
}
early_initcall(insert_crashkernel_resources);
#endif
+#endif
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 86fe172b5958..a5e0dfc44d24 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -69,11 +69,14 @@
* @alloc_size: Size of the allocated buffer.
* @list: The free list describing the number of free entries available
* from each index.
+ * @pad_slots: Number of preceding padding slots. Valid only in the first
+ * allocated non-padding slot.
*/
struct io_tlb_slot {
phys_addr_t orig_addr;
size_t alloc_size;
- unsigned int list;
+ unsigned short list;
+ unsigned short pad_slots;
};
static bool swiotlb_force_bounce;
@@ -287,6 +290,7 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
mem->nslabs - i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
+ mem->slots[i].pad_slots = 0;
}
memset(vaddr, 0, bytes);
@@ -821,12 +825,30 @@ void swiotlb_dev_init(struct device *dev)
#endif
}
-/*
- * Return the offset into a iotlb slot required to keep the device happy.
+/**
+ * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
+ * @dev: Owning device.
+ * @align_mask: Allocation alignment mask.
+ * @addr: DMA address.
+ *
+ * Return the minimum offset from the start of an IO TLB allocation which is
+ * required for a given buffer address and allocation alignment to keep the
+ * device happy.
+ *
+ * First, the address bits covered by min_align_mask must be identical in the
+ * original address and the bounce buffer address. High bits are preserved by
+ * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
+ * padding bytes before the bounce buffer.
+ *
+ * Second, @align_mask specifies which bits of the first allocated slot must
+ * be zero. This may require allocating additional padding slots, and then the
+ * offset (in bytes) from the first such padding slot is returned.
*/
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+static unsigned int swiotlb_align_offset(struct device *dev,
+ unsigned int align_mask, u64 addr)
{
- return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+ return addr & dma_get_min_align_mask(dev) &
+ (align_mask | (IO_TLB_SIZE - 1));
}
/*
@@ -841,27 +863,23 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
- unsigned int tlb_offset, orig_addr_offset;
+ int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR)
return;
- tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
- orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
- if (tlb_offset < orig_addr_offset) {
- dev_WARN_ONCE(dev, 1,
- "Access before mapping start detected. orig offset %u, requested offset %u.\n",
- orig_addr_offset, tlb_offset);
- return;
- }
-
- tlb_offset -= orig_addr_offset;
- if (tlb_offset > alloc_size) {
- dev_WARN_ONCE(dev, 1,
- "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
- alloc_size, size, tlb_offset);
- return;
- }
+ /*
+ * It's valid for tlb_offset to be negative. This can happen when the
+ * "offset" returned by swiotlb_align_offset() is non-zero, and the
+ * tlb_addr is pointing within the first "offset" bytes of the second
+ * or subsequent slots of the allocated swiotlb area. While it's not
+ * valid for tlb_addr to be pointing within the first "offset" bytes
+ * of the first slot, there's no way to check for such an error since
+ * this function can't distinguish the first slot from the second and
+ * subsequent slots.
+ */
+ tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+ swiotlb_align_offset(dev, 0, orig_addr);
orig_addr += tlb_offset;
alloc_size -= tlb_offset;
@@ -1005,7 +1023,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
unsigned long max_slots = get_max_slots(boundary_mask);
unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
unsigned int nslots = nr_slots(alloc_size), stride;
- unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+ unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
unsigned int index, slots_checked, count = 0, i;
unsigned long flags;
unsigned int slot_base;
@@ -1328,11 +1346,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
unsigned long attrs)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
- unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+ unsigned int offset;
struct io_tlb_pool *pool;
unsigned int i;
int index;
phys_addr_t tlb_addr;
+ unsigned short pad_slots;
if (!mem || !mem->nslabs) {
dev_warn_ratelimited(dev,
@@ -1349,6 +1368,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR;
}
+ offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
index = swiotlb_find_slots(dev, orig_addr,
alloc_size + offset, alloc_align_mask, &pool);
if (index == -1) {
@@ -1364,6 +1384,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
+ pad_slots = offset >> IO_TLB_SHIFT;
+ offset &= (IO_TLB_SIZE - 1);
+ index += pad_slots;
+ pool->slots[index].pad_slots = pad_slots;
for (i = 0; i < nr_slots(alloc_size + offset); i++)
pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(pool->start, index) + offset;
@@ -1384,13 +1408,17 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
{
struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
unsigned long flags;
- unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
- int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
- int nslots = nr_slots(mem->slots[index].alloc_size + offset);
- int aindex = index / mem->area_nslabs;
- struct io_tlb_area *area = &mem->areas[aindex];
+ unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
+ int index, nslots, aindex;
+ struct io_tlb_area *area;
int count, i;
+ index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
+ index -= mem->slots[index].pad_slots;
+ nslots = nr_slots(mem->slots[index].alloc_size + offset);
+ aindex = index / mem->area_nslabs;
+ area = &mem->areas[aindex];
+
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
@@ -1413,6 +1441,7 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
mem->slots[i].list = ++count;
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
+ mem->slots[i].pad_slots = 0;
}
/*
@@ -1647,9 +1676,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
const char *dirname)
{
- atomic_long_set(&mem->total_used, 0);
- atomic_long_set(&mem->used_hiwater, 0);
-
mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
if (!mem->nslabs)
return;
@@ -1660,7 +1686,6 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
&fops_io_tlb_hiwater);
#ifdef CONFIG_SWIOTLB_DYNAMIC
- atomic_long_set(&mem->transient_nslabs, 0);
debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
mem, &fops_io_tlb_transient_used);
#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 39a5046c2f0b..aebb3e6c96dc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -714,6 +714,23 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
} else if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
vm_flags_clear(tmp, VM_LOCKED_MASK);
+ /*
+ * Copy/update hugetlb private vma information.
+ */
+ if (is_vm_hugetlb_page(tmp))
+ hugetlb_dup_vma_private(tmp);
+
+ /*
+ * Link the vma into the MT. After using __mt_dup(), memory
+ * allocation is not necessary here, so it cannot fail.
+ */
+ vma_iter_bulk_store(&vmi, tmp);
+
+ mm->map_count++;
+
+ if (tmp->vm_ops && tmp->vm_ops->open)
+ tmp->vm_ops->open(tmp);
+
file = tmp->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
@@ -730,25 +747,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
i_mmap_unlock_write(mapping);
}
- /*
- * Copy/update hugetlb private vma information.
- */
- if (is_vm_hugetlb_page(tmp))
- hugetlb_dup_vma_private(tmp);
-
- /*
- * Link the vma into the MT. After using __mt_dup(), memory
- * allocation is not necessary here, so it cannot fail.
- */
- vma_iter_bulk_store(&vmi, tmp);
-
- mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
retval = copy_page_range(tmp, mpnt);
- if (tmp->vm_ops && tmp->vm_ops->open)
- tmp->vm_ops->open(tmp);
-
if (retval) {
mpnt = vma_next(&vmi);
goto loop_out;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ad3eaf2ab959..bf9ae8a8686f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1643,8 +1643,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
if (!((old->flags & new->flags) & IRQF_SHARED) ||
- (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
- ((old->flags ^ new->flags) & IRQF_ONESHOT))
+ (oldtype != (new->flags & IRQF_TRIGGER_MASK)))
+ goto mismatch;
+
+ if ((old->flags & IRQF_ONESHOT) &&
+ (new->flags & IRQF_COND_ONESHOT))
+ new->flags |= IRQF_ONESHOT;
+ else if ((old->flags ^ new->flags) & IRQF_ONESHOT)
goto mismatch;
/* All handlers must agree on per-cpuness */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9d9095e81792..65adc815fc6e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1567,10 +1567,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
jump_label_lock();
preempt_disable();
- /* Ensure it is not in reserved area nor out of text */
- if (!(core_kernel_text((unsigned long) p->addr) ||
- is_module_text_address((unsigned long) p->addr)) ||
- in_gate_area_no_mm((unsigned long) p->addr) ||
+ /* Ensure the address is in a text area, and find a module if exists. */
+ *probed_mod = NULL;
+ if (!core_kernel_text((unsigned long) p->addr)) {
+ *probed_mod = __module_text_address((unsigned long) p->addr);
+ if (!(*probed_mod)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ /* Ensure it is not in reserved area. */
+ if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
@@ -1580,8 +1587,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
goto out;
}
- /* Check if 'p' is probing a module. */
- *probed_mod = __module_text_address((unsigned long) p->addr);
+ /* Get module refcount and reject __init functions for loaded modules. */
if (*probed_mod) {
/*
* We must hold a refcount of the probed module while updating
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index c3ced519e14b..f3e0329337f6 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -236,6 +236,10 @@ choice
possible to load a signed module containing the algorithm to check
the signature on that module.
+config MODULE_SIG_SHA1
+ bool "Sign modules with SHA-1"
+ select CRYPTO_SHA1
+
config MODULE_SIG_SHA256
bool "Sign modules with SHA-256"
select CRYPTO_SHA256
@@ -265,6 +269,7 @@ endchoice
config MODULE_SIG_HASH
string
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ default "sha1" if MODULE_SIG_SHA1
default "sha256" if MODULE_SIG_SHA256
default "sha384" if MODULE_SIG_SHA384
default "sha512" if MODULE_SIG_SHA512
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index e3ae93bbcb9b..09f8397bae15 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -106,6 +106,12 @@ static void s2idle_enter(void)
swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
+ /*
+ * Kick all CPUs to ensure that they resume their timers and restore
+ * consistent system state.
+ */
+ wake_up_all_idle_cpus();
+
cpus_read_unlock();
raw_spin_lock_irq(&s2idle_lock);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ca5146006b94..adf99c05adca 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2009,6 +2009,12 @@ static int console_trylock_spinning(void)
*/
mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+ /*
+ * Update @console_may_schedule for trylock because the previous
+ * owner may have been schedulable.
+ */
+ console_may_schedule = 0;
+
return 1;
}
diff --git a/kernel/profile.c b/kernel/profile.c
index 8a77769bc4b4..2b775cc5c28f 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -344,49 +344,6 @@ void profile_tick(int type)
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
- return 0;
-}
-
-static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, prof_cpu_mask_proc_show, NULL);
-}
-
-static ssize_t prof_cpu_mask_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- cpumask_var_t new_value;
- int err;
-
- if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
- return -ENOMEM;
-
- err = cpumask_parse_user(buffer, count, new_value);
- if (!err) {
- cpumask_copy(prof_cpu_mask, new_value);
- err = count;
- }
- free_cpumask_var(new_value);
- return err;
-}
-
-static const struct proc_ops prof_cpu_mask_proc_ops = {
- .proc_open = prof_cpu_mask_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
- .proc_write = prof_cpu_mask_proc_write,
-};
-
-void create_prof_cpu_mask(void)
-{
- /* create /proc/irq/prof_cpu_mask */
- proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops);
-}
-
/*
* This function accesses profiling information. The returned data is
* binary: the sampling step and the actual contents of the profile
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03be0d1330a6..c62805dbd608 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -696,15 +696,21 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
*
* XXX could add max_slice to the augmented data to track this.
*/
-static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static s64 entity_lag(u64 avruntime, struct sched_entity *se)
{
- s64 lag, limit;
+ s64 vlag, limit;
+
+ vlag = avruntime - se->vruntime;
+ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+ return clamp(vlag, -limit, limit);
+}
+
+static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
SCHED_WARN_ON(!se->on_rq);
- lag = avg_vruntime(cfs_rq) - se->vruntime;
- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
- se->vlag = clamp(lag, -limit, limit);
+ se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
}
/*
@@ -3676,11 +3682,10 @@ static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
-static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
+static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
unsigned long weight)
{
unsigned long old_weight = se->load.weight;
- u64 avruntime = avg_vruntime(cfs_rq);
s64 vlag, vslice;
/*
@@ -3761,7 +3766,7 @@ static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
* = V - vl'
*/
if (avruntime != se->vruntime) {
- vlag = (s64)(avruntime - se->vruntime);
+ vlag = entity_lag(avruntime, se);
vlag = div_s64(vlag * old_weight, weight);
se->vruntime = avruntime - vlag;
}
@@ -3787,25 +3792,26 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
bool curr = cfs_rq->curr == se;
+ u64 avruntime;
if (se->on_rq) {
/* commit outstanding execution time */
- if (curr)
- update_curr(cfs_rq);
- else
+ update_curr(cfs_rq);
+ avruntime = avg_vruntime(cfs_rq);
+ if (!curr)
__dequeue_entity(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
- if (!se->on_rq) {
+ if (se->on_rq) {
+ reweight_eevdf(se, avruntime, weight);
+ } else {
/*
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
* we need to scale se->vlag when w_i changes.
*/
se->vlag = div_s64(se->vlag * se->load.weight, weight);
- } else {
- reweight_eevdf(cfs_rq, se, weight);
}
update_load_set(&se->load, weight);
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 373d42c707bc..5891e715f00d 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -46,7 +46,16 @@ int housekeeping_any_cpu(enum hk_type type)
if (cpu < nr_cpu_ids)
return cpu;
- return cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask);
+ cpu = cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask);
+ if (likely(cpu < nr_cpu_ids))
+ return cpu;
+ /*
+ * Unless we have another problem this can only happen
+ * at boot time before start_secondary() brings the 1st
+ * housekeeping CPU up.
+ */
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING ||
+ type != HK_TYPE_TIMER);
}
}
return smp_processor_id();
@@ -109,6 +118,7 @@ static void __init housekeeping_setup_type(enum hk_type type,
static int __init housekeeping_setup(char *str, unsigned long flags)
{
cpumask_var_t non_housekeeping_mask, housekeeping_staging;
+ unsigned int first_cpu;
int err = 0;
if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) {
@@ -129,7 +139,8 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
cpumask_andnot(housekeeping_staging,
cpu_possible_mask, non_housekeeping_mask);
- if (!cpumask_intersects(cpu_present_mask, housekeeping_staging)) {
+ first_cpu = cpumask_first_and(cpu_present_mask, housekeeping_staging);
+ if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) {
__cpumask_set_cpu(smp_processor_id(), housekeeping_staging);
__cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
if (!housekeeping.flags) {
@@ -138,6 +149,9 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
}
}
+ if (cpumask_empty(non_housekeeping_mask))
+ goto free_housekeeping_staging;
+
if (!housekeeping.flags) {
/* First setup call ("nohz_full=" or "isolcpus=") */
enum hk_type type;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d2242679239e..ae50f212775e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -79,6 +79,8 @@
# include <asm/paravirt_api_clock.h>
#endif
+#include <asm/barrier.h>
+
#include "cpupri.h"
#include "cpudeadline.h"
@@ -3445,13 +3447,19 @@ static inline void switch_mm_cid(struct rq *rq,
* between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
* Provide it here.
*/
- if (!prev->mm) // from kernel
+ if (!prev->mm) { // from kernel
smp_mb();
- /*
- * user -> user transition guarantees a memory barrier through
- * switch_mm() when current->mm changes. If current->mm is
- * unchanged, no barrier is needed.
- */
+ } else { // from user
+ /*
+ * user->user transition relies on an implicit
+ * memory barrier in switch_mm() when
+ * current->mm changes. If the architecture
+ * switch_mm() does not have an implicit memory
+ * barrier, it is emitted here. If current->mm
+ * is unchanged, no barrier is needed.
+ */
+ smp_mb__after_switch_mm();
+ }
}
if (prev->mm_cid_active) {
mm_cid_snapshot_time(rq, prev->mm);
diff --git a/kernel/sys.c b/kernel/sys.c
index f8e543f1e38a..8bb106a56b3a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2408,8 +2408,11 @@ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
return -EINVAL;
- /* PARISC cannot allow mdwe as it needs writable stacks */
- if (IS_ENABLED(CONFIG_PARISC))
+ /*
+ * EOPNOTSUPP might be more appropriate here in principle, but
+ * existing userspace depends on EINVAL specifically.
+ */
+ if (!arch_memory_deny_write_exec_supported())
return -EINVAL;
current_bits = get_current_mdwe();
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 9de66bbbb3d1..4782edcbe7b9 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -129,15 +129,17 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
goto out;
}
pccontext->clk = clk;
- fp->private_data = pccontext;
- if (clk->ops.open)
+ if (clk->ops.open) {
err = clk->ops.open(pccontext, fp->f_mode);
- else
- err = 0;
-
- if (!err) {
- get_device(clk->dev);
+ if (err) {
+ kfree(pccontext);
+ goto out;
+ }
}
+
+ fp->private_data = pccontext;
+ get_device(clk->dev);
+ err = 0;
out:
up_read(&clk->rwsem);
return err;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index fb0fdec8719a..d88b13076b79 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -7,6 +7,7 @@
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*/
+#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
@@ -84,7 +85,7 @@ int tick_is_oneshot_available(void)
*/
static void tick_periodic(int cpu)
{
- if (tick_do_timer_cpu == cpu) {
+ if (READ_ONCE(tick_do_timer_cpu) == cpu) {
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
@@ -215,8 +216,8 @@ static void tick_setup_device(struct tick_device *td,
* If no cpu took the do_timer update, assign it to
* this cpu:
*/
- if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
- tick_do_timer_cpu = cpu;
+ if (READ_ONCE(tick_do_timer_cpu) == TICK_DO_TIMER_BOOT) {
+ WRITE_ONCE(tick_do_timer_cpu, cpu);
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
@@ -232,7 +233,7 @@ static void tick_setup_device(struct tick_device *td,
!tick_nohz_full_cpu(cpu)) {
tick_take_do_timer_from_boot();
tick_do_timer_boot_cpu = -1;
- WARN_ON(tick_do_timer_cpu != cpu);
+ WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
#endif
}
@@ -406,10 +407,10 @@ void tick_assert_timekeeping_handover(void)
int tick_cpu_dying(unsigned int dying_cpu)
{
/*
- * If the current CPU is the timekeeper, it's the only one that
- * can safely hand over its duty. Also all online CPUs are in
- * stop machine, guaranteed not to be idle, therefore it's safe
- * to pick any online successor.
+ * If the current CPU is the timekeeper, it's the only one that can
+ * safely hand over its duty. Also all online CPUs are in stop
+ * machine, guaranteed not to be idle, therefore there is no
+ * concurrency and it's safe to pick any online successor.
*/
if (tick_do_timer_cpu == dying_cpu)
tick_do_timer_cpu = cpumask_first(cpu_online_mask);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 269e21590df5..71a792cd8936 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -8,6 +8,7 @@
*
* Started by: Thomas Gleixner and Ingo Molnar
*/
+#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
@@ -204,7 +205,7 @@ static inline void tick_sched_flag_clear(struct tick_sched *ts,
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{
- int cpu = smp_processor_id();
+ int tick_cpu, cpu = smp_processor_id();
/*
* Check if the do_timer duty was dropped. We don't care about
@@ -216,16 +217,18 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
* If nohz_full is enabled, this should not happen because the
* 'tick_do_timer_cpu' CPU never relinquishes.
*/
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) &&
- unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
+ tick_cpu = READ_ONCE(tick_do_timer_cpu);
+
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) {
#ifdef CONFIG_NO_HZ_FULL
WARN_ON_ONCE(tick_nohz_full_running);
#endif
- tick_do_timer_cpu = cpu;
+ WRITE_ONCE(tick_do_timer_cpu, cpu);
+ tick_cpu = cpu;
}
/* Check if jiffies need an update */
- if (tick_do_timer_cpu == cpu)
+ if (tick_cpu == cpu)
tick_do_update_jiffies64(now);
/*
@@ -610,7 +613,7 @@ bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
* timers, workqueues, timekeeping, ...) on behalf of full dynticks
* CPUs. It must remain online when nohz full is enabled.
*/
- if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+ if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
return false;
return true;
}
@@ -697,6 +700,7 @@ bool tick_nohz_tick_stopped_cpu(int cpu)
/**
* tick_nohz_update_jiffies - update jiffies when idle was interrupted
+ * @now: current ktime_t
*
* Called from interrupt entry when the CPU was idle
*
@@ -794,7 +798,7 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
- * This function returns -1 if NOHZ is not enabled.
+ * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
*/
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
@@ -820,7 +824,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
- * This function returns -1 if NOHZ is not enabled.
+ * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
*/
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
{
@@ -890,6 +894,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{
u64 basemono, next_tick, delta, expires;
unsigned long basejiff;
+ int tick_cpu;
basemono = get_jiffies_update(&basejiff);
ts->last_jiffies = basejiff;
@@ -946,9 +951,9 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
* Otherwise we can sleep as long as we want.
*/
delta = timekeeping_max_deferment();
- if (cpu != tick_do_timer_cpu &&
- (tick_do_timer_cpu != TICK_DO_TIMER_NONE ||
- !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
+ tick_cpu = READ_ONCE(tick_do_timer_cpu);
+ if (tick_cpu != cpu &&
+ (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
delta = KTIME_MAX;
/* Calculate the next expiry time */
@@ -969,6 +974,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
unsigned long basejiff = ts->last_jiffies;
u64 basemono = ts->timer_expires_base;
bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
+ int tick_cpu;
u64 expires;
/* Make sure we won't be trying to stop it twice in a row. */
@@ -1006,10 +1012,11 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
* do_timer() never gets invoked. Keep track of the fact that it
* was the one which had the do_timer() duty last.
*/
- if (cpu == tick_do_timer_cpu) {
- tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ tick_cpu = READ_ONCE(tick_do_timer_cpu);
+ if (tick_cpu == cpu) {
+ WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST);
- } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+ } else if (tick_cpu != TICK_DO_TIMER_NONE) {
tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST);
}
@@ -1172,15 +1179,17 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (tick_nohz_full_enabled()) {
+ int tick_cpu = READ_ONCE(tick_do_timer_cpu);
+
/*
* Keep the tick alive to guarantee timekeeping progression
* if there are full dynticks CPUs around
*/
- if (tick_do_timer_cpu == cpu)
+ if (tick_cpu == cpu)
return false;
/* Should not happen for nohz-full */
- if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
+ if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE))
return false;
}
@@ -1287,6 +1296,8 @@ void tick_nohz_irq_exit(void)
/**
* tick_nohz_idle_got_tick - Check whether or not the tick handler has run
+ *
+ * Return: %true if the tick handler has run, otherwise %false
*/
bool tick_nohz_idle_got_tick(void)
{
@@ -1305,6 +1316,8 @@ bool tick_nohz_idle_got_tick(void)
* stopped, it returns the next hrtimer.
*
* Called from power state control code with interrupts disabled
+ *
+ * Return: the next expiration time
*/
ktime_t tick_nohz_get_next_hrtimer(void)
{
@@ -1320,6 +1333,8 @@ ktime_t tick_nohz_get_next_hrtimer(void)
* The return value of this function and/or the value returned by it through the
* @delta_next pointer can be negative which must be taken into account by its
* callers.
+ *
+ * Return: the expected length of the current sleep
*/
ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
@@ -1357,8 +1372,11 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
* for a particular CPU.
+ * @cpu: target CPU number
*
* Called from the schedutil frequency scaling governor in scheduler context.
+ *
+ * Return: the current idle calls counter value for @cpu
*/
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
{
@@ -1371,6 +1389,8 @@ unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
* tick_nohz_get_idle_calls - return the current idle calls counter value
*
* Called from the schedutil frequency scaling governor in scheduler context.
+ *
+ * Return: the current idle calls counter value for the current CPU
*/
unsigned long tick_nohz_get_idle_calls(void)
{
@@ -1559,7 +1579,7 @@ early_param("skew_tick", skew_tick);
/**
* tick_setup_sched_timer - setup the tick emulation timer
- * @mode: tick_nohz_mode to setup for
+ * @hrtimer: whether to use the hrtimer or not
*/
void tick_setup_sched_timer(bool hrtimer)
{
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index e11c4dc65bcb..b4a7822f495d 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -46,8 +46,8 @@ struct tick_device {
* @next_tick: Next tick to be fired when in dynticks mode.
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_waketime: Time when the idle was interrupted
+ * @idle_sleeptime_seq: sequence counter for data consistency
* @idle_entrytime: Time when the idle call was entered
- * @nohz_mode: Mode - one state of tick_nohz_mode
* @last_jiffies: Base jiffies snapshot when next event was last computed
* @timer_expires_base: Base time clock monotonic for @timer_expires
* @timer_expires: Anticipated timer expiration time (in case sched tick is stopped)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index dee29f1f5b75..3baf2fbe6848 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -64,15 +64,15 @@ EXPORT_SYMBOL(jiffies_64);
/*
* The timer wheel has LVL_DEPTH array levels. Each level provides an array of
- * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
+ * LVL_SIZE buckets. Each level is driven by its own clock and therefore each
* level has a different granularity.
*
- * The level granularity is: LVL_CLK_DIV ^ lvl
+ * The level granularity is: LVL_CLK_DIV ^ level
* The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
*
* The array level of a newly armed timer depends on the relative expiry
* time. The farther the expiry time is away the higher the array level and
- * therefor the granularity becomes.
+ * therefore the granularity becomes.
*
* Contrary to the original timer wheel implementation, which aims for 'exact'
* expiry of the timers, this implementation removes the need for recascading
@@ -207,7 +207,7 @@ EXPORT_SYMBOL(jiffies_64);
* struct timer_base - Per CPU timer base (number of base depends on config)
* @lock: Lock protecting the timer_base
* @running_timer: When expiring timers, the lock is dropped. To make
- * sure not to race agains deleting/modifying a
+ * sure not to race against deleting/modifying a
* currently running timer, the pointer is set to the
* timer, which expires at the moment. If no timer is
* running, the pointer is NULL.
@@ -737,7 +737,7 @@ static bool timer_is_static_object(void *addr)
}
/*
- * fixup_init is called when:
+ * timer_fixup_init is called when:
* - an active object is initialized
*/
static bool timer_fixup_init(void *addr, enum debug_obj_state state)
@@ -761,7 +761,7 @@ static void stub_timer(struct timer_list *unused)
}
/*
- * fixup_activate is called when:
+ * timer_fixup_activate is called when:
* - an active object is activated
* - an unknown non-static object is activated
*/
@@ -783,7 +783,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
}
/*
- * fixup_free is called when:
+ * timer_fixup_free is called when:
* - an active object is freed
*/
static bool timer_fixup_free(void *addr, enum debug_obj_state state)
@@ -801,7 +801,7 @@ static bool timer_fixup_free(void *addr, enum debug_obj_state state)
}
/*
- * fixup_assert_init is called when:
+ * timer_fixup_assert_init is called when:
* - an untracked/uninit-ed object is found
*/
static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
@@ -914,7 +914,7 @@ static void do_init_timer(struct timer_list *timer,
* @key: lockdep class key of the fake lock used for tracking timer
* sync lock dependencies
*
- * init_timer_key() must be done to a timer prior calling *any* of the
+ * init_timer_key() must be done to a timer prior to calling *any* of the
* other timer functions.
*/
void init_timer_key(struct timer_list *timer,
@@ -1417,7 +1417,7 @@ static int __timer_delete(struct timer_list *timer, bool shutdown)
* If @shutdown is set then the lock has to be taken whether the
* timer is pending or not to protect against a concurrent rearm
* which might hit between the lockless pending check and the lock
- * aquisition. By taking the lock it is ensured that such a newly
+ * acquisition. By taking the lock it is ensured that such a newly
* enqueued timer is dequeued and cannot end up with
* timer->function == NULL in the expiry code.
*
@@ -2306,7 +2306,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
/*
* When timer base is not set idle, undo the effect of
- * tmigr_cpu_deactivate() to prevent inconsitent states - active
+ * tmigr_cpu_deactivate() to prevent inconsistent states - active
* timer base but inactive timer migration hierarchy.
*
* When timer base was already marked idle, nothing will be
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index c63a0afdcebe..ccba875d2234 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -751,6 +751,33 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
first_childevt = evt = data->evt;
+ /*
+ * Walking the hierarchy is required in any case when a
+ * remote expiry was done before. This ensures to not lose
+ * already queued events in non active groups (see section
+ * "Required event and timerqueue update after a remote
+ * expiry" in the documentation at the top).
+ *
+ * The two call sites which are executed without a remote expiry
+ * before, are not prevented from propagating changes through
+ * the hierarchy by the return:
+ * - When entering this path by tmigr_new_timer(), @evt->ignore
+ * is never set.
+ * - tmigr_inactive_up() takes care of the propagation by
+ * itself and ignores the return value. But an immediate
+ * return is possible if there is a parent, sparing group
+ * locking at this level, because the upper walking call to
+ * the parent will take care about removing this event from
+ * within the group and update next_expiry accordingly.
+ *
+ * However if there is no parent, ie: the hierarchy has only a
+ * single level so @group is the top level group, make sure the
+ * first event information of the group is updated properly and
+ * also handled properly, so skip this fast return path.
+ */
+ if (evt->ignore && !remote && group->parent)
+ return true;
+
raw_spin_lock(&group->lock);
childstate.state = 0;
@@ -762,8 +789,11 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
* queue when the expiry time changed only or when it could be ignored.
*/
if (timerqueue_node_queued(&evt->nextevt)) {
- if ((evt->nextevt.expires == nextexp) && !evt->ignore)
+ if ((evt->nextevt.expires == nextexp) && !evt->ignore) {
+ /* Make sure not to miss a new CPU event with the same expiry */
+ evt->cpu = first_childevt->cpu;
goto check_toplvl;
+ }
if (!timerqueue_del(&group->events, &evt->nextevt))
WRITE_ONCE(group->next_expiry, KTIME_MAX);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 61c541c36596..47345bf1d4a9 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -965,7 +965,7 @@ config FTRACE_RECORD_RECURSION
config FTRACE_RECORD_RECURSION_SIZE
int "Max number of recursed functions to record"
- default 128
+ default 128
depends on FTRACE_RECORD_RECURSION
help
This defines the limit of number of functions that can be
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0a5c4efc73c3..9dc605f08a23 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2728,7 +2728,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
.release = bpf_kprobe_multi_link_release,
- .dealloc = bpf_kprobe_multi_link_dealloc,
+ .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
};
@@ -3157,6 +3157,9 @@ static void bpf_uprobe_multi_link_release(struct bpf_link *link)
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
+ if (umulti_link->task)
+ put_task_struct(umulti_link->task);
+ path_put(&umulti_link->path);
}
static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
@@ -3164,9 +3167,6 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
struct bpf_uprobe_multi_link *umulti_link;
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
- if (umulti_link->task)
- put_task_struct(umulti_link->task);
- path_put(&umulti_link->path);
kvfree(umulti_link->uprobes);
kfree(umulti_link);
}
@@ -3242,7 +3242,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
.release = bpf_uprobe_multi_link_release,
- .dealloc = bpf_uprobe_multi_link_dealloc,
+ .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
};
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 25476ead681b..6511dc3a00da 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1393,7 +1393,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
- local_inc(&cpu_buffer->pages_touched);
/*
* Just make sure we have seen our old_write and synchronize
* with any interrupts that come in.
@@ -1430,8 +1429,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_set(&next_page->page->commit, 0);
- /* Again, either we update tail_page or an interrupt does */
- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
+ /* Either we update tail_page or an interrupt does */
+ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
+ local_inc(&cpu_buffer->pages_touched);
}
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7c364b87352e..52f75c36bbca 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1670,6 +1670,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PERF_EVENTS
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
@@ -1684,6 +1685,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
+#endif
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
@@ -2152,10 +2154,12 @@ static const struct file_operations ftrace_event_format_fops = {
.release = seq_release,
};
+#ifdef CONFIG_PERF_EVENTS
static const struct file_operations ftrace_event_id_fops = {
.read = event_id_read,
.llseek = default_llseek,
};
+#endif
static const struct file_operations ftrace_event_filter_fops = {
.open = tracing_open_file_tr,
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 217169de0920..dfe3ee6035ec 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -839,7 +839,7 @@ out:
void store_trace_entry_data(void *edata, struct trace_probe *tp, struct pt_regs *regs)
{
struct probe_entry_arg *earg = tp->entry_arg;
- unsigned long val;
+ unsigned long val = 0;
int i;
if (!earg)
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index f95516cd45bb..23c125c2e243 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -205,11 +205,10 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_head_mask);
#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#ifdef CONFIG_HUGETLB_PAGE
- VMCOREINFO_NUMBER(PG_hugetlb);
+#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
+ VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
-#endif
#ifdef CONFIG_KALLSYMS
VMCOREINFO_SYMBOL(kallsyms_names);
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index c59d26068a64..97f8911ea339 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size)
return memblock_alloc(size, SMP_CACHE_BYTES);
}
-static inline void __init xbc_free_mem(void *addr, size_t size)
+static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
{
- memblock_free(addr, size);
+ if (early)
+ memblock_free(addr, size);
+ else if (addr)
+ memblock_free_late(__pa(addr), size);
}
#else /* !__KERNEL__ */
@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size)
return malloc(size);
}
-static inline void xbc_free_mem(void *addr, size_t size)
+static inline void xbc_free_mem(void *addr, size_t size, bool early)
{
free(addr);
}
@@ -898,19 +901,20 @@ static int __init xbc_parse_tree(void)
}
/**
- * xbc_exit() - Clean up all parsed bootconfig
+ * _xbc_exit() - Clean up all parsed bootconfig
+ * @early: Set true if this is called before budy system is initialized.
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can
* use this.
*/
-void __init xbc_exit(void)
+void __init _xbc_exit(bool early)
{
- xbc_free_mem(xbc_data, xbc_data_size);
+ xbc_free_mem(xbc_data, xbc_data_size, early);
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
xbc_nodes = NULL;
brace_index = 0;
}
@@ -963,7 +967,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
if (!xbc_nodes) {
if (emsg)
*emsg = "Failed to allocate bootconfig nodes";
- xbc_exit();
+ _xbc_exit(true);
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -977,7 +981,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
*epos = xbc_err_pos;
if (emsg)
*emsg = xbc_err_msg;
- xbc_exit();
+ _xbc_exit(true);
} else
ret = xbc_node_num;
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index bf70850035c7..404dba36bae3 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -594,13 +594,15 @@ static void test_ip_fast_csum(struct kunit *test)
static void test_csum_ipv6_magic(struct kunit *test)
{
-#if defined(CONFIG_NET)
const struct in6_addr *saddr;
const struct in6_addr *daddr;
unsigned int len;
unsigned char proto;
__wsum csum;
+ if (!IS_ENABLED(CONFIG_NET))
+ return;
+
const int daddr_offset = sizeof(struct in6_addr);
const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
@@ -618,7 +620,6 @@ static void test_csum_ipv6_magic(struct kunit *test)
CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
csum_ipv6_magic(saddr, daddr, len, proto, csum));
}
-#endif /* !CONFIG_NET */
}
static struct kunit_case __refdata checksum_test_cases[] = {
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index af6cc19a2003..cd8f23455285 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -330,7 +330,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
stack = current_pool + pool_offset;
/* Pre-initialize handle once. */
- stack->handle.pool_index = pool_index + 1;
+ stack->handle.pool_index_plus_1 = pool_index + 1;
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.extra = 0;
INIT_LIST_HEAD(&stack->hash_list);
@@ -441,7 +441,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
void *pool;
- u32 pool_index = parts.pool_index - 1;
+ u32 pool_index = parts.pool_index_plus_1 - 1;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
@@ -627,10 +627,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
- * contexts and I/O.
+ * contexts, I/O, nolockdep.
*/
alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 276c12140ee2..c288df9372ed 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -134,7 +134,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
};
/* Excluded because they Oops the module. */
-static const test_ubsan_fp skip_ubsan_array[] = {
+static __used const test_ubsan_fp skip_ubsan_array[] = {
test_ubsan_divrem_overflow,
};
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 5fc107f61934..a1c983d148f1 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,9 +44,10 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#ifdef CONFIG_UBSAN_DIV_ZERO
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
/*
- * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerKind::IntegerDivideByZero and
+ * SanitizerKind::SignedIntegerOverflow emit
* SanitizerHandler::DivremOverflow.
*/
case ubsan_divrem_overflow:
@@ -78,6 +79,19 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
+#ifdef CONFIG_UBSAN_SIGNED_WRAP
+ /*
+ * SanitizerKind::SignedIntegerOverflow emits
+ * SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
+ * or SanitizerHandler::MulOverflow.
+ */
+ case ubsan_add_overflow:
+ return "UBSAN: integer addition overflow";
+ case ubsan_sub_overflow:
+ return "UBSAN: integer subtraction overflow";
+ case ubsan_mul_overflow:
+ return "UBSAN: integer multiplication overflow";
+#endif
default:
return "UBSAN: unrecognized failure code";
}
diff --git a/mm/Makefile b/mm/Makefile
index e4b5b75aaec9..4abb40b911ec 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -29,8 +29,7 @@ KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
KCOV_INSTRUMENT_failslab.o := n
-CFLAGS_init-mm.o += $(call cc-disable-warning, override-init)
-CFLAGS_init-mm.o += $(call cc-disable-warning, initializer-overrides)
+CFLAGS_init-mm.o += -Wno-override-init
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
diff --git a/mm/filemap.c b/mm/filemap.c
index 7437b2bd75c1..30de18c4fd28 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4197,7 +4197,23 @@ static void filemap_cachestat(struct address_space *mapping,
/* shmem file - in swap cache */
swp_entry_t swp = radix_to_swp_entry(folio);
+ /* swapin error results in poisoned entry */
+ if (non_swap_entry(swp))
+ goto resched;
+
+ /*
+ * Getting a swap entry from the shmem
+ * inode means we beat
+ * shmem_unuse(). rcu_read_lock()
+ * ensures swapoff waits for us before
+ * freeing the swapper space. However,
+ * we can race with swapping and
+ * invalidation, so there might not be
+ * a shadow in the swapcache (yet).
+ */
shadow = get_shadow_from_swap_cache(swp);
+ if (!shadow)
+ goto resched;
}
#endif
if (workingset_test_recent(shadow, true, &workingset))
diff --git a/mm/gup.c b/mm/gup.c
index df83182ec72d..1611e73b1121 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1206,6 +1206,22 @@ static long __get_user_pages(struct mm_struct *mm,
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
+ /*
+ * MADV_POPULATE_(READ|WRITE) wants to handle VMA
+ * lookups+error reporting differently.
+ */
+ if (gup_flags & FOLL_MADV_POPULATE) {
+ vma = vma_lookup(mm, start);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (check_vma_flags(vma, gup_flags)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ goto retry;
+ }
vma = gup_vma_lookup(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
@@ -1653,20 +1669,22 @@ long populate_vma_page_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKONFAULT)
return nr_pages;
+ /* ... similarly, we've never faulted in PROT_NONE pages */
+ if (!vma_is_accessible(vma))
+ return -EFAULT;
+
gup_flags = FOLL_TOUCH;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
* and we would not want to dirty them for nothing.
+ *
+ * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
+ * readable (ie write-only or executable).
*/
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
-
- /*
- * We want mlock to succeed for regions that have any permissions
- * other than PROT_NONE.
- */
- if (vma_is_accessible(vma))
+ else
gup_flags |= FOLL_FORCE;
if (locked)
@@ -1683,35 +1701,35 @@ long populate_vma_page_range(struct vm_area_struct *vma,
}
/*
- * faultin_vma_page_range() - populate (prefault) page tables inside the
- * given VMA range readable/writable
+ * faultin_page_range() - populate (prefault) page tables inside the
+ * given range readable/writable
*
* This takes care of mlocking the pages, too, if VM_LOCKED is set.
*
- * @vma: target vma
+ * @mm: the mm to populate page tables in
* @start: start address
* @end: end address
* @write: whether to prefault readable or writable
* @locked: whether the mmap_lock is still held
*
- * Returns either number of processed pages in the vma, or a negative error
- * code on error (see __get_user_pages()).
+ * Returns either number of processed pages in the MM, or a negative error
+ * code on error (see __get_user_pages()). Note that this function reports
+ * errors related to VMAs, such as incompatible mappings, as expected by
+ * MADV_POPULATE_(READ|WRITE).
*
- * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
- * covered by the VMA. If it's released, *@locked will be set to 0.
+ * The range must be page-aligned.
+ *
+ * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
*/
-long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, bool write, int *locked)
+long faultin_page_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool write, int *locked)
{
- struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
long ret;
VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end));
- VM_BUG_ON_VMA(start < vma->vm_start, vma);
- VM_BUG_ON_VMA(end > vma->vm_end, vma);
mmap_assert_locked(mm);
/*
@@ -1723,19 +1741,13 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
* a poisoned page.
* !FOLL_FORCE: Require proper access permissions.
*/
- gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
+ gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
+ FOLL_MADV_POPULATE;
if (write)
gup_flags |= FOLL_WRITE;
- /*
- * We want to report -EINVAL instead of -EFAULT for any permission
- * problems or incompatible mappings.
- */
- if (check_vma_flags(vma, gup_flags))
- return -EINVAL;
-
- ret = __get_user_pages(mm, start, nr_pages, gup_flags,
- NULL, locked);
+ ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
+ gup_flags);
lru_add_drain();
return ret;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9859aa4f7553..89f58c7603b2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,9 +2259,6 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
goto unlock_ptls;
}
- folio_move_anon_rmap(src_folio, dst_vma);
- WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
-
src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
/* Folio got pinned from under us. Put it back and fail the move. */
if (folio_maybe_dma_pinned(src_folio)) {
@@ -2270,6 +2267,9 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
goto unlock_ptls;
}
+ folio_move_anon_rmap(src_folio, dst_vma);
+ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
_dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
/* Follow mremap() behavior and treat the entry dirty after the move */
_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 23ef240ba48a..ce7be5c24442 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1624,7 +1624,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
{
lockdep_assert_held(&hugetlb_lock);
- folio_clear_hugetlb(folio);
+ __folio_clear_hugetlb(folio);
}
/*
@@ -1711,7 +1711,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
h->surplus_huge_pages_node[nid]++;
}
- folio_set_hugetlb(folio);
+ __folio_set_hugetlb(folio);
folio_change_private(folio, NULL);
/*
* We have to set hugetlb_vmemmap_optimized again as above
@@ -1781,7 +1781,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
* If vmemmap pages were allocated above, then we need to clear the
* hugetlb destructor under the hugetlb lock.
*/
- if (clear_dtor) {
+ if (folio_test_hugetlb(folio)) {
spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio);
spin_unlock_irq(&hugetlb_lock);
@@ -2049,7 +2049,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- folio_set_hugetlb(folio);
+ __folio_set_hugetlb(folio);
INIT_LIST_HEAD(&folio->lru);
hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL);
@@ -2160,22 +2160,6 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
}
/*
- * PageHuge() only returns true for hugetlbfs pages, but not for normal or
- * transparent huge pages. See the PageTransHuge() documentation for more
- * details.
- */
-int PageHuge(const struct page *page)
-{
- const struct folio *folio;
-
- if (!PageCompound(page))
- return 0;
- folio = page_folio(page);
- return folio_test_hugetlb(folio);
-}
-EXPORT_SYMBOL_GPL(PageHuge);
-
-/*
* Find and lock address space (mapping) in write mode.
*
* Upon entry, the page is locked which means that page_mapping() is
@@ -3268,9 +3252,12 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust);
- if (deferred_reserve)
+ if (deferred_reserve) {
+ spin_lock_irq(&hugetlb_lock);
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
+ spin_unlock_irq(&hugetlb_lock);
+ }
}
if (!memcg_charge_ret)
@@ -6274,6 +6261,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
VM_UFFD_MISSING);
}
+ if (!(vma->vm_flags & VM_MAYSHARE)) {
+ ret = vmf_anon_prepare(vmf);
+ if (unlikely(ret))
+ goto out;
+ }
+
folio = alloc_hugetlb_folio(vma, haddr, 0);
if (IS_ERR(folio)) {
/*
@@ -6310,15 +6303,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
*/
restore_reserve_on_error(h, vma, haddr, folio);
folio_put(folio);
+ ret = VM_FAULT_SIGBUS;
goto out;
}
new_pagecache_folio = true;
} else {
folio_lock(folio);
-
- ret = vmf_anon_prepare(vmf);
- if (unlikely(ret))
- goto backout_unlocked;
anon_rmap = 1;
}
} else {
@@ -7044,9 +7034,13 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
if (!pte_same(pte, newpte))
set_huge_pte_at(mm, address, ptep, newpte, psize);
} else if (unlikely(is_pte_marker(pte))) {
- /* No other markers apply for now. */
- WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
- if (uffd_wp_resolve)
+ /*
+ * Do nothing on a poison marker; page is
+ * corrupted, permissons do not apply. Here
+ * pte_marker_uffd_wp()==true implies !poison
+ * because they're mutual exclusive.
+ */
+ if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
/* Safe to modify directly (non-present->none). */
huge_pte_clear(mm, address, ptep, psize);
} else if (!huge_pte_none(pte)) {
diff --git a/mm/internal.h b/mm/internal.h
index 7e486f2c502c..07ad2675a88b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -686,9 +686,8 @@ struct anon_vma *folio_anon_vma(struct folio *folio);
void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked);
-extern long faultin_vma_page_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- bool write, int *locked);
+extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool write, int *locked);
extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
unsigned long bytes);
@@ -1127,10 +1126,13 @@ enum {
FOLL_FAST_ONLY = 1 << 20,
/* allow unlocking the mmap lock */
FOLL_UNLOCKABLE = 1 << 21,
+ /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
+ FOLL_MADV_POPULATE = 1 << 22,
};
#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
- FOLL_FAST_ONLY | FOLL_UNLOCKABLE)
+ FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
+ FOLL_MADV_POPULATE)
/*
* Indicates for which pages that are write-protected in the page table,
diff --git a/mm/madvise.c b/mm/madvise.c
index 44a498c94158..1a073fcc4c0c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -908,27 +908,14 @@ static long madvise_populate(struct vm_area_struct *vma,
{
const bool write = behavior == MADV_POPULATE_WRITE;
struct mm_struct *mm = vma->vm_mm;
- unsigned long tmp_end;
int locked = 1;
long pages;
*prev = vma;
while (start < end) {
- /*
- * We might have temporarily dropped the lock. For example,
- * our VMA might have been split.
- */
- if (!vma || start >= vma->vm_end) {
- vma = vma_lookup(mm, start);
- if (!vma)
- return -ENOMEM;
- }
-
- tmp_end = min_t(unsigned long, end, vma->vm_end);
/* Populate (prefault) page tables readable/writable. */
- pages = faultin_vma_page_range(vma, start, tmp_end, write,
- &locked);
+ pages = faultin_page_range(mm, start, end, write, &locked);
if (!locked) {
mmap_read_lock(mm);
locked = 1;
@@ -949,7 +936,7 @@ static long madvise_populate(struct vm_area_struct *vma,
pr_warn_once("%s: unhandled return value: %ld\n",
__func__, pages);
fallthrough;
- case -ENOMEM:
+ case -ENOMEM: /* No VMA or out of memory. */
return -ENOMEM;
}
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9349948f1abf..9e62a00b46dd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -154,11 +154,23 @@ static int __page_handle_poison(struct page *page)
{
int ret;
- zone_pcp_disable(page_zone(page));
+ /*
+ * zone_pcp_disable() can't be used here. It will
+ * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
+ * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
+ * optimization is enabled. This will break current lock dependency
+ * chain and leads to deadlock.
+ * Disabling pcp before dissolving the page was a deterministic
+ * approach because we made sure that those pages cannot end up in any
+ * PCP list. Draining PCP lists expels those pages to the buddy system,
+ * but nothing guarantees that those pages do not get back to a PCP
+ * queue if we need to refill those.
+ */
ret = dissolve_free_huge_page(page);
- if (!ret)
+ if (!ret) {
+ drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
- zone_pcp_enable(page_zone(page));
+ }
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index f2bc6dd15eb8..d2155ced45f8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1536,7 +1536,9 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
- VM_WARN_ON_ONCE(userfaultfd_wp(vma));
+ if (userfaultfd_pte_wp(vma, ptent))
+ zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
+ details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
@@ -5971,6 +5973,10 @@ int follow_phys(struct vm_area_struct *vma,
goto out;
pte = ptep_get(ptep);
+ /* Never return PFNs of anon folios in COW mappings. */
+ if (vm_normal_folio(vma, address, pte))
+ goto unlock;
+
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index e7139952ffd9..742f432e5bf0 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -54,6 +54,22 @@ static depot_stack_handle_t early_handle;
static void init_early_allocated_pages(void);
+static inline void set_current_in_page_owner(void)
+{
+ /*
+ * Avoid recursion.
+ *
+ * We might need to allocate more memory from page_owner code, so make
+ * sure to signal it in order to avoid recursion.
+ */
+ current->in_page_owner = 1;
+}
+
+static inline void unset_current_in_page_owner(void)
+{
+ current->in_page_owner = 0;
+}
+
static int __init early_page_owner_param(char *buf)
{
int ret = kstrtobool(buf, &page_owner_enabled);
@@ -102,7 +118,6 @@ static __init void init_page_owner(void)
register_dummy_stack();
register_failure_stack();
register_early_stack();
- static_branch_enable(&page_owner_inited);
init_early_allocated_pages();
/* Initialize dummy and failure stacks and link them to stack_list */
dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle);
@@ -113,6 +128,7 @@ static __init void init_page_owner(void)
refcount_set(&failure_stack.stack_record->count, 1);
dummy_stack.next = &failure_stack;
stack_list = &dummy_stack;
+ static_branch_enable(&page_owner_inited);
}
struct page_ext_operations page_owner_ops = {
@@ -133,23 +149,16 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
unsigned int nr_entries;
- /*
- * Avoid recursion.
- *
- * Sometimes page metadata allocation tracking requires more
- * memory to be allocated:
- * - when new stack trace is saved to stack depot
- */
if (current->in_page_owner)
return dummy_handle;
- current->in_page_owner = 1;
+ set_current_in_page_owner();
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
+ unset_current_in_page_owner();
- current->in_page_owner = 0;
return handle;
}
@@ -164,9 +173,13 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
gfp_mask |= __GFP_NOWARN;
+ set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_mask);
- if (!stack)
+ if (!stack) {
+ unset_current_in_page_owner();
return;
+ }
+ unset_current_in_page_owner();
stack->stack_record = stack_record;
stack->next = NULL;
@@ -183,7 +196,8 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
spin_unlock_irqrestore(&stack_list_lock, flags);
}
-static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask)
+static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask,
+ int nr_base_pages)
{
struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
@@ -204,20 +218,74 @@ static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask)
/* Add the new stack_record to our list */
add_stack_record_to_list(stack_record, gfp_mask);
}
- refcount_inc(&stack_record->count);
+ refcount_add(nr_base_pages, &stack_record->count);
}
-static void dec_stack_record_count(depot_stack_handle_t handle)
+static void dec_stack_record_count(depot_stack_handle_t handle,
+ int nr_base_pages)
{
struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
- if (stack_record)
- refcount_dec(&stack_record->count);
+ if (!stack_record)
+ return;
+
+ if (refcount_sub_and_test(nr_base_pages, &stack_record->count))
+ pr_warn("%s: refcount went to 0 for %u handle\n", __func__,
+ handle);
}
-void __reset_page_owner(struct page *page, unsigned short order)
+static inline void __update_page_owner_handle(struct page_ext *page_ext,
+ depot_stack_handle_t handle,
+ unsigned short order,
+ gfp_t gfp_mask,
+ short last_migrate_reason, u64 ts_nsec,
+ pid_t pid, pid_t tgid, char *comm)
{
int i;
+ struct page_owner *page_owner;
+
+ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->handle = handle;
+ page_owner->order = order;
+ page_owner->gfp_mask = gfp_mask;
+ page_owner->last_migrate_reason = last_migrate_reason;
+ page_owner->pid = pid;
+ page_owner->tgid = tgid;
+ page_owner->ts_nsec = ts_nsec;
+ strscpy(page_owner->comm, comm,
+ sizeof(page_owner->comm));
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+ page_ext = page_ext_next(page_ext);
+ }
+}
+
+static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
+ depot_stack_handle_t handle,
+ unsigned short order,
+ pid_t pid, pid_t tgid,
+ u64 free_ts_nsec)
+{
+ int i;
+ struct page_owner *page_owner;
+
+ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ /* Only __reset_page_owner() wants to clear the bit */
+ if (handle) {
+ __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+ page_owner->free_handle = handle;
+ }
+ page_owner->free_ts_nsec = free_ts_nsec;
+ page_owner->free_pid = current->pid;
+ page_owner->free_tgid = current->tgid;
+ page_ext = page_ext_next(page_ext);
+ }
+}
+
+void __reset_page_owner(struct page *page, unsigned short order)
+{
struct page_ext *page_ext;
depot_stack_handle_t handle;
depot_stack_handle_t alloc_handle;
@@ -232,16 +300,10 @@ void __reset_page_owner(struct page *page, unsigned short order)
alloc_handle = page_owner->handle;
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
- for (i = 0; i < (1 << order); i++) {
- __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
- page_owner->free_handle = handle;
- page_owner->free_ts_nsec = free_ts_nsec;
- page_owner->free_pid = current->pid;
- page_owner->free_tgid = current->tgid;
- page_ext = page_ext_next(page_ext);
- page_owner = get_page_owner(page_ext);
- }
+ __update_page_owner_free_handle(page_ext, handle, order, current->pid,
+ current->tgid, free_ts_nsec);
page_ext_put(page_ext);
+
if (alloc_handle != early_handle)
/*
* early_handle is being set as a handle for all those
@@ -250,39 +312,14 @@ void __reset_page_owner(struct page *page, unsigned short order)
* the machinery is not ready yet, we cannot decrement
* their refcount either.
*/
- dec_stack_record_count(alloc_handle);
-}
-
-static inline void __set_page_owner_handle(struct page_ext *page_ext,
- depot_stack_handle_t handle,
- unsigned short order, gfp_t gfp_mask)
-{
- struct page_owner *page_owner;
- int i;
- u64 ts_nsec = local_clock();
-
- for (i = 0; i < (1 << order); i++) {
- page_owner = get_page_owner(page_ext);
- page_owner->handle = handle;
- page_owner->order = order;
- page_owner->gfp_mask = gfp_mask;
- page_owner->last_migrate_reason = -1;
- page_owner->pid = current->pid;
- page_owner->tgid = current->tgid;
- page_owner->ts_nsec = ts_nsec;
- strscpy(page_owner->comm, current->comm,
- sizeof(page_owner->comm));
- __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
-
- page_ext = page_ext_next(page_ext);
- }
+ dec_stack_record_count(alloc_handle, 1 << order);
}
noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
struct page_ext *page_ext;
+ u64 ts_nsec = local_clock();
depot_stack_handle_t handle;
handle = save_stack(gfp_mask);
@@ -290,9 +327,11 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
- __set_page_owner_handle(page_ext, handle, order, gfp_mask);
+ __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
+ current->pid, current->tgid, ts_nsec,
+ current->comm);
page_ext_put(page_ext);
- inc_stack_record_count(handle, gfp_mask);
+ inc_stack_record_count(handle, gfp_mask, 1 << order);
}
void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -327,9 +366,12 @@ void __split_page_owner(struct page *page, int old_order, int new_order)
void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{
+ int i;
struct page_ext *old_ext;
struct page_ext *new_ext;
- struct page_owner *old_page_owner, *new_page_owner;
+ struct page_owner *old_page_owner;
+ struct page_owner *new_page_owner;
+ depot_stack_handle_t migrate_handle;
old_ext = page_ext_get(&old->page);
if (unlikely(!old_ext))
@@ -343,30 +385,32 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
old_page_owner = get_page_owner(old_ext);
new_page_owner = get_page_owner(new_ext);
- new_page_owner->order = old_page_owner->order;
- new_page_owner->gfp_mask = old_page_owner->gfp_mask;
- new_page_owner->last_migrate_reason =
- old_page_owner->last_migrate_reason;
- new_page_owner->handle = old_page_owner->handle;
- new_page_owner->pid = old_page_owner->pid;
- new_page_owner->tgid = old_page_owner->tgid;
- new_page_owner->free_pid = old_page_owner->free_pid;
- new_page_owner->free_tgid = old_page_owner->free_tgid;
- new_page_owner->ts_nsec = old_page_owner->ts_nsec;
- new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
- strcpy(new_page_owner->comm, old_page_owner->comm);
-
+ migrate_handle = new_page_owner->handle;
+ __update_page_owner_handle(new_ext, old_page_owner->handle,
+ old_page_owner->order, old_page_owner->gfp_mask,
+ old_page_owner->last_migrate_reason,
+ old_page_owner->ts_nsec, old_page_owner->pid,
+ old_page_owner->tgid, old_page_owner->comm);
/*
- * We don't clear the bit on the old folio as it's going to be freed
- * after migration. Until then, the info can be useful in case of
- * a bug, and the overall stats will be off a bit only temporarily.
- * Also, migrate_misplaced_transhuge_page() can still fail the
- * migration and then we want the old folio to retain the info. But
- * in that case we also don't need to explicitly clear the info from
- * the new page, which will be freed.
+ * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio
+ * will be freed after migration. Keep them until then as they may be
+ * useful.
*/
- __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
+ __update_page_owner_free_handle(new_ext, 0, old_page_owner->order,
+ old_page_owner->free_pid,
+ old_page_owner->free_tgid,
+ old_page_owner->free_ts_nsec);
+ /*
+ * We linked the original stack to the new folio, we need to do the same
+ * for the new one and the old folio otherwise there will be an imbalance
+ * when subtracting those pages from the stack.
+ */
+ for (i = 0; i < (1 << new_page_owner->order); i++) {
+ old_page_owner->handle = migrate_handle;
+ old_ext = page_ext_next(old_ext);
+ old_page_owner = get_page_owner(old_ext);
+ }
+
page_ext_put(new_ext);
page_ext_put(old_ext);
}
@@ -774,8 +818,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
goto ext_put_continue;
/* Found early allocated page */
- __set_page_owner_handle(page_ext, early_handle,
- 0, 0);
+ __update_page_owner_handle(page_ext, early_handle, 0, 0,
+ -1, local_clock(), current->pid,
+ current->tgid, current->comm);
count++;
ext_put_continue:
page_ext_put(page_ext);
@@ -827,13 +872,11 @@ static void *stack_start(struct seq_file *m, loff_t *ppos)
* value of stack_list.
*/
stack = smp_load_acquire(&stack_list);
+ m->private = stack;
} else {
stack = m->private;
- stack = stack->next;
}
- m->private = stack;
-
return stack;
}
@@ -848,11 +891,11 @@ static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
return stack;
}
-static unsigned long page_owner_stack_threshold;
+static unsigned long page_owner_pages_threshold;
static int stack_print(struct seq_file *m, void *v)
{
- int i, stack_count;
+ int i, nr_base_pages;
struct stack *stack = v;
unsigned long *entries;
unsigned long nr_entries;
@@ -863,14 +906,14 @@ static int stack_print(struct seq_file *m, void *v)
nr_entries = stack_record->size;
entries = stack_record->entries;
- stack_count = refcount_read(&stack_record->count) - 1;
+ nr_base_pages = refcount_read(&stack_record->count) - 1;
- if (stack_count < 1 || stack_count < page_owner_stack_threshold)
+ if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold)
return 0;
for (i = 0; i < nr_entries; i++)
seq_printf(m, " %pS\n", (void *)entries[i]);
- seq_printf(m, "stack_count: %d\n\n", stack_count);
+ seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages);
return 0;
}
@@ -900,13 +943,13 @@ static const struct file_operations page_owner_stack_operations = {
static int page_owner_threshold_get(void *data, u64 *val)
{
- *val = READ_ONCE(page_owner_stack_threshold);
+ *val = READ_ONCE(page_owner_pages_threshold);
return 0;
}
static int page_owner_threshold_set(void *data, u64 val)
{
- WRITE_ONCE(page_owner_stack_threshold, val);
+ WRITE_ONCE(page_owner_pages_threshold, val);
return 0;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 0aad0d9a621b..94ab99b6b574 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -748,12 +748,6 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
-{
- return false;
-}
-
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
{
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
index 062d1c1097ae..ce514e700d2f 100644
--- a/mm/shmem_quota.c
+++ b/mm/shmem_quota.c
@@ -116,7 +116,7 @@ static int shmem_free_file_info(struct super_block *sb, int type)
static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, *qid);
struct quota_info *dqopt = sb_dqopt(sb);
struct quota_id *entry = NULL;
@@ -126,6 +126,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
return -ESRCH;
down_read(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
@@ -165,7 +166,7 @@ out_unlock:
static int shmem_acquire_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node **n;
struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
struct rb_node *parent = NULL, *new_node = NULL;
struct quota_id *new_entry, *entry;
@@ -176,6 +177,8 @@ static int shmem_acquire_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
down_write(&dqopt->dqio_sem);
+ n = &((struct rb_root *)info->dqi_priv)->rb_node;
+
while (*n) {
parent = *n;
entry = rb_entry(parent, struct quota_id, node);
@@ -264,7 +267,7 @@ static bool shmem_is_empty_dquot(struct dquot *dquot)
static int shmem_release_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
struct quota_id *entry = NULL;
@@ -275,6 +278,7 @@ static int shmem_release_dquot(struct dquot *dquot)
goto out_dqlock;
down_write(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 712160cd41ec..3c3539c573e7 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1444,7 +1444,8 @@ static int uffd_move_lock(struct mm_struct *mm,
*/
down_read(&(*dst_vmap)->vm_lock->lock);
if (*dst_vmap != *src_vmap)
- down_read(&(*src_vmap)->vm_lock->lock);
+ down_read_nested(&(*src_vmap)->vm_lock->lock,
+ SINGLE_DEPTH_NESTING);
}
mmap_read_unlock(mm);
return err;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 22aa63f4ef63..68fa001648cc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -989,6 +989,27 @@ unsigned long vmalloc_nr_pages(void)
return atomic_long_read(&nr_vmalloc_pages);
}
+static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
+{
+ struct rb_node *n = root->rb_node;
+
+ addr = (unsigned long)kasan_reset_tag((void *)addr);
+
+ while (n) {
+ struct vmap_area *va;
+
+ va = rb_entry(n, struct vmap_area, rb_node);
+ if (addr < va->va_start)
+ n = n->rb_left;
+ else if (addr >= va->va_end)
+ n = n->rb_right;
+ else
+ return va;
+ }
+
+ return NULL;
+}
+
/* Look up the first VA which satisfies addr < va_end, NULL if none. */
static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
@@ -1025,47 +1046,39 @@ __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
{
- struct vmap_node *vn, *va_node = NULL;
- struct vmap_area *va_lowest;
+ unsigned long va_start_lowest;
+ struct vmap_node *vn;
int i;
- for (i = 0; i < nr_vmap_nodes; i++) {
+repeat:
+ for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
vn = &vmap_nodes[i];
spin_lock(&vn->busy.lock);
- va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
- if (va_lowest) {
- if (!va_node || va_lowest->va_start < (*va)->va_start) {
- if (va_node)
- spin_unlock(&va_node->busy.lock);
-
- *va = va_lowest;
- va_node = vn;
- continue;
- }
- }
+ *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
+
+ if (*va)
+ if (!va_start_lowest || (*va)->va_start < va_start_lowest)
+ va_start_lowest = (*va)->va_start;
spin_unlock(&vn->busy.lock);
}
- return va_node;
-}
-
-static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
-{
- struct rb_node *n = root->rb_node;
+ /*
+ * Check if found VA exists, it might have gone away. In this case we
+ * repeat the search because a VA has been removed concurrently and we
+ * need to proceed to the next one, which is a rare case.
+ */
+ if (va_start_lowest) {
+ vn = addr_to_node(va_start_lowest);
- addr = (unsigned long)kasan_reset_tag((void *)addr);
+ spin_lock(&vn->busy.lock);
+ *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
- while (n) {
- struct vmap_area *va;
+ if (*va)
+ return vn;
- va = rb_entry(n, struct vmap_area, rb_node);
- if (addr < va->va_start)
- n = n->rb_left;
- else if (addr >= va->va_end)
- n = n->rb_right;
- else
- return va;
+ spin_unlock(&vn->busy.lock);
+ goto repeat;
}
return NULL;
@@ -2343,6 +2356,9 @@ struct vmap_area *find_vmap_area(unsigned long addr)
struct vmap_area *va;
int i, j;
+ if (unlikely(!vmap_initialized))
+ return NULL;
+
/*
* An addr_to_node_id(addr) converts an address to a node index
* where a VA is located. If VA spans several zones and passed
diff --git a/mm/zswap.c b/mm/zswap.c
index 9dec853647c8..6f8850c44b61 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1080,7 +1080,17 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
mutex_lock(&acomp_ctx->mutex);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
- if (acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) {
+ /*
+ * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
+ * to do crypto_acomp_decompress() which might sleep. In such cases, we must
+ * resort to copying the buffer to a temporary one.
+ * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
+ * such as a kmap address of high memory or even ever a vmap address.
+ * However, sg_init_one is only equipped to handle linearly mapped low memory.
+ * In such cases, we also must copy the buffer to a temporary and lowmem one.
+ */
+ if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
+ !virt_addr_valid(src)) {
memcpy(acomp_ctx->buffer, src, entry->length);
src = acomp_ctx->buffer;
zpool_unmap_handle(zpool, entry->handle);
@@ -1094,7 +1104,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
mutex_unlock(&acomp_ctx->mutex);
- if (!acomp_ctx->is_sleepable || zpool_can_sleep_mapped(zpool))
+ if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
}
@@ -1313,15 +1323,30 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
-#ifdef CONFIG_MEMCG_KMEM
- mem_cgroup_flush_stats(memcg);
- nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
-#else
- /* use pool stats instead of memcg stats */
- nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
- nr_stored = atomic_read(&zswap_nr_stored);
-#endif
+ /*
+ * The shrinker resumes swap writeback, which will enter block
+ * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
+ * rules (may_enter_fs()), which apply on a per-folio basis.
+ */
+ if (!gfp_has_io_fs(sc->gfp_mask))
+ return 0;
+
+ /*
+ * For memcg, use the cgroup-wide ZSWAP stats since we don't
+ * have them per-node and thus per-lruvec. Careful if memcg is
+ * runtime-disabled: we can get sc->memcg == NULL, which is ok
+ * for the lruvec, but not for memcg_page_state().
+ *
+ * Without memcg, use the zswap pool-wide metrics.
+ */
+ if (!mem_cgroup_disabled()) {
+ mem_cgroup_flush_stats(memcg);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ } else {
+ nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
+ nr_stored = atomic_read(&zswap_nr_stored);
+ }
if (!nr_stored)
return 0;
@@ -1618,6 +1643,7 @@ bool zswap_load(struct folio *folio)
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
struct page *page = &folio->page;
+ bool swapcache = folio_test_swapcache(folio);
struct zswap_tree *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
u8 *dst;
@@ -1630,7 +1656,20 @@ bool zswap_load(struct folio *folio)
spin_unlock(&tree->lock);
return false;
}
- zswap_rb_erase(&tree->rbroot, entry);
+ /*
+ * When reading into the swapcache, invalidate our entry. The
+ * swapcache can be the authoritative owner of the page and
+ * its mappings, and the pressure that results from having two
+ * in-memory copies outweighs any benefits of caching the
+ * compression work.
+ *
+ * (Most swapins go through the swapcache. The notable
+ * exception is the singleton fault on SWP_SYNCHRONOUS_IO
+ * files, which reads into a private page and may free it if
+ * the fault fails. We remain the primary owner of the entry.)
+ */
+ if (swapcache)
+ zswap_rb_erase(&tree->rbroot, entry);
spin_unlock(&tree->lock);
if (entry->length)
@@ -1645,9 +1684,10 @@ bool zswap_load(struct folio *folio)
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
- zswap_entry_free(entry);
-
- folio_mark_dirty(folio);
+ if (swapcache) {
+ zswap_entry_free(entry);
+ folio_mark_dirty(folio);
+ }
return true;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index e265a0ca6bdd..f7e90b4769bb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1583,7 +1583,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
received = rsize;
}
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
@@ -1609,9 +1609,6 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
int total = 0;
*err = 0;
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
- fid->fid, offset, iov_iter_count(from));
-
while (iov_iter_count(from)) {
int count = iov_iter_count(from);
int rsize = fid->iounit;
@@ -1623,6 +1620,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
if (count < rsize)
rsize = count;
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
+ fid->fid, offset, rsize, count);
+
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && rsize > 1024) {
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
@@ -1650,7 +1650,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
written = rsize;
}
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 1a3948b8c493..196060dc6138 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -95,7 +95,6 @@ struct p9_poll_wait {
* @unsent_req_list: accounting for requests that haven't been sent
* @rreq: read request
* @wreq: write request
- * @req: current request being processed (if any)
* @tmp_buf: temporary buffer to read in header
* @rc: temporary fcall for reading current frame
* @wpos: write position for current frame
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 558e158c98d0..9169efb2f43a 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -103,7 +103,7 @@ again:
s->ax25_dev = NULL;
if (sk->sk_socket) {
netdev_put(ax25_dev->dev,
- &ax25_dev->dev_tracker);
+ &s->dev_tracker);
ax25_dev_put(ax25_dev);
}
ax25_cb_del(s);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index c5462486dbca..282ec581c072 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -105,7 +105,7 @@ void ax25_dev_device_down(struct net_device *dev)
spin_lock_bh(&ax25_dev_lock);
#ifdef CONFIG_AX25_DAMA_SLAVE
- ax25_ds_del_timer(ax25_dev);
+ timer_shutdown_sync(&ax25_dev->dama.slave_timer);
#endif
/*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b95c36765d04..2243cec18ecc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock);
- while (true) {
+ while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3ad74f76983b..05346250f719 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1263,7 +1263,7 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role)
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
{
struct hci_conn *conn;
struct smp_irk *irk;
@@ -1326,6 +1326,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
+ conn->le_adv_phy = phy;
+ conn->le_adv_sec_phy = sec_phy;
err = hci_connect_le_sync(hdev, conn);
if (err) {
@@ -2273,7 +2275,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
le = hci_connect_le(hdev, dst, dst_type, false,
BT_SECURITY_LOW,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, 0, 0);
else
le = hci_connect_le_scan(hdev, dst, dst_type,
BT_SECURITY_LOW,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 1690ae57a09d..a7028d38c1f5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2874,7 +2874,7 @@ static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
cancel_delayed_work_sync(&hdev->ncmd_timer);
atomic_set(&hdev->cmd_cnt, 1);
- hci_cmd_sync_cancel_sync(hdev, -err);
+ hci_cmd_sync_cancel_sync(hdev, err);
}
/* Suspend HCI device */
@@ -2894,7 +2894,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -4210,7 +4210,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
err = hci_send_frame(hdev, skb);
if (err < 0) {
- hci_cmd_sync_cancel_sync(hdev, err);
+ hci_cmd_sync_cancel_sync(hdev, -err);
return;
}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 233453807b50..ce3ff2fa72e5 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -218,10 +218,12 @@ static int conn_info_min_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val > hdev->conn_info_max_age)
+ hci_dev_lock(hdev);
+ if (val == 0 || val > hdev->conn_info_max_age) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->conn_info_min_age = val;
hci_dev_unlock(hdev);
@@ -246,10 +248,12 @@ static int conn_info_max_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val < hdev->conn_info_min_age)
+ hci_dev_lock(hdev);
+ if (val == 0 || val < hdev->conn_info_min_age) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->conn_info_max_age = val;
hci_dev_unlock(hdev);
@@ -567,10 +571,12 @@ static int sniff_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+ hci_dev_lock(hdev);
+ if (val == 0 || val % 2 || val > hdev->sniff_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->sniff_min_interval = val;
hci_dev_unlock(hdev);
@@ -595,10 +601,12 @@ static int sniff_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+ hci_dev_lock(hdev);
+ if (val == 0 || val % 2 || val < hdev->sniff_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->sniff_max_interval = val;
hci_dev_unlock(hdev);
@@ -850,10 +858,12 @@ static int conn_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_conn_min_interval = val;
hci_dev_unlock(hdev);
@@ -878,10 +888,12 @@ static int conn_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_conn_max_interval = val;
hci_dev_unlock(hdev);
@@ -990,10 +1002,12 @@ static int adv_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_adv_min_interval = val;
hci_dev_unlock(hdev);
@@ -1018,10 +1032,12 @@ static int adv_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
+ hci_dev_lock(hdev);
+ if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }
- hci_dev_lock(hdev);
hdev->le_adv_max_interval = val;
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4ae224824012..4a27e4a17a67 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3208,6 +3208,31 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
if (test_bit(HCI_ENCRYPT, &hdev->flags))
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+ /* "Link key request" completed ahead of "connect request" completes */
+ if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
+ ev->link_type == ACL_LINK) {
+ struct link_key *key;
+ struct hci_cp_read_enc_key_size cp;
+
+ key = hci_find_link_key(hdev, &ev->bdaddr);
+ if (key) {
+ set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+
+ if (!read_key_size_capable(hdev)) {
+ conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ } else {
+ cp.handle = cpu_to_le16(conn->handle);
+ if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
+ sizeof(cp), &cp)) {
+ bt_dev_err(hdev, "sending read key size failed");
+ conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ }
+ }
+
+ hci_encrypt_cfm(conn, ev->status);
+ }
+ }
+
/* Get remote features */
if (conn->type == ACL_LINK) {
struct hci_cp_read_remote_features cp;
@@ -3641,8 +3666,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
* controller really supports it. If it doesn't, assume
* the default size (16).
*/
- if (!(hdev->commands[20] & 0x10) ||
- test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
+ if (!read_key_size_capable(hdev)) {
conn->enc_key_size = HCI_LINK_KEY_SIZE;
goto notify;
}
@@ -6013,7 +6037,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type, bool addr_resolved,
- u8 adv_type)
+ u8 adv_type, u8 phy, u8 sec_phy)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -6068,7 +6092,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
- HCI_ROLE_MASTER);
+ HCI_ROLE_MASTER, phy, sec_phy);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -6103,8 +6127,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *direct_addr,
- u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
- bool ext_adv, bool ctl_time, u64 instant)
+ u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
+ u8 *data, u8 len, bool ext_adv, bool ctl_time,
+ u64 instant)
{
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
@@ -6192,7 +6217,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* for advertising reports) and is already verified to be RPA above.
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
- type);
+ type, phy, sec_phy);
if (!ext_adv && conn && type == LE_ADV_IND &&
len <= max_adv_len(hdev)) {
/* Store report for later inclusion by
@@ -6338,7 +6363,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
if (info->length <= max_adv_len(hdev)) {
rssi = info->data[info->length];
process_adv_report(hdev, info->type, &info->bdaddr,
- info->bdaddr_type, NULL, 0, rssi,
+ info->bdaddr_type, NULL, 0,
+ HCI_ADV_PHY_1M, 0, rssi,
info->data, info->length, false,
false, instant);
} else {
@@ -6423,6 +6449,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
if (legacy_evt_type != LE_ADV_INVALID) {
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
info->bdaddr_type, NULL, 0,
+ info->primary_phy,
+ info->secondary_phy,
info->rssi, info->data, info->length,
!(evt_type & LE_EXT_ADV_LEGACY_PDU),
false, instant);
@@ -6705,8 +6733,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
process_adv_report(hdev, info->type, &info->bdaddr,
info->bdaddr_type, &info->direct_addr,
- info->direct_addr_type, info->rssi, NULL, 0,
- false, false, instant);
+ info->direct_addr_type, HCI_ADV_PHY_1M, 0,
+ info->rssi, NULL, 0, false, false, instant);
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 00e02138003e..efea25eb56ce 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
- if (skb)
+ if (skb) {
+ kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb);
+ }
wake_up_interruptible(&hdev->req_wait_q);
}
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 4ee1b976678b..703b84bd48d5 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1946,10 +1946,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
switch (optname) {
case HCI_DATA_DIR:
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
+ if (err)
break;
- }
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
@@ -1958,10 +1957,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
break;
case HCI_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
+ if (err)
break;
- }
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
@@ -1979,11 +1977,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
- len = min_t(unsigned int, len, sizeof(uf));
- if (copy_from_sockptr(&uf, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len);
+ if (err)
break;
- }
if (!capable(CAP_NET_RAW)) {
uf.type_mask &= hci_sec_filter.type_mask;
@@ -2042,10 +2038,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
goto done;
}
- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
+ if (err)
break;
- }
hci_pi(sk)->mtu = opt;
break;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index f6b662369322..4c707eb64e6f 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -617,7 +617,10 @@ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = err;
+ /* req_result is __u32 so error must be positive to be properly
+ * propagated.
+ */
+ hdev->req_result = err < 0 ? -err : err;
hdev->req_status = HCI_REQ_CANCELED;
wake_up_interruptible(&hdev->req_wait_q);
@@ -2811,8 +2814,8 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
cp->scanning_phys |= LE_SCAN_PHY_CODED;
hci_le_scan_phy_params(phy, type,
- interval,
- window);
+ interval * 3,
+ window * 3);
num_phy++;
phy++;
}
@@ -2832,7 +2835,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
if (scan_coded(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_CODED;
- hci_le_scan_phy_params(phy, type, interval, window);
+ hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
num_phy++;
phy++;
}
@@ -3416,7 +3419,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
return;
- bacpy(&hdev->public_addr, &ba);
+ if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
+ baswap(&hdev->public_addr, &ba);
+ else
+ bacpy(&hdev->public_addr, &ba);
}
struct hci_init_stage {
@@ -6340,7 +6346,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen = sizeof(*cp);
- if (scan_1m(hdev)) {
+ if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
cp->phys |= LE_SCAN_PHY_1M;
set_ext_conn_params(conn, p);
@@ -6348,7 +6355,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen += sizeof(*p);
}
- if (scan_2m(hdev)) {
+ if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
cp->phys |= LE_SCAN_PHY_2M;
set_ext_conn_params(conn, p);
@@ -6356,7 +6364,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
plen += sizeof(*p);
}
- if (scan_coded(hdev)) {
+ if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
+ conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
cp->phys |= LE_SCAN_PHY_CODED;
set_ext_conn_params(conn, p);
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index c8793e57f4b5..ef0cc80b4c0c 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1451,8 +1451,8 @@ static bool check_ucast_qos(struct bt_iso_qos *qos)
static bool check_bcast_qos(struct bt_iso_qos *qos)
{
- if (qos->bcast.sync_factor == 0x00)
- return false;
+ if (!qos->bcast.sync_factor)
+ qos->bcast.sync_factor = 0x01;
if (qos->bcast.packing > 0x01)
return false;
@@ -1475,6 +1475,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
if (qos->bcast.skip > 0x01f3)
return false;
+ if (!qos->bcast.sync_timeout)
+ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
+
if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000)
return false;
@@ -1484,6 +1487,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
if (qos->bcast.mse > 0x1f)
return false;
+ if (!qos->bcast.timeout)
+ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
+
if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000)
return false;
@@ -1494,7 +1500,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int len, err = 0;
+ int err = 0;
struct bt_iso_qos qos = default_qos;
u32 opt;
@@ -1509,10 +1515,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -1521,10 +1526,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
@@ -1539,17 +1543,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- len = min_t(unsigned int, sizeof(qos), optlen);
-
- if (copy_from_sockptr(&qos, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) {
- err = -EINVAL;
+ err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen);
+ if (err)
break;
- }
iso_pi(sk)->qos = qos;
iso_pi(sk)->qos_user_set = true;
@@ -1564,18 +1560,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
}
if (optlen > sizeof(iso_pi(sk)->base)) {
- err = -EOVERFLOW;
+ err = -EINVAL;
break;
}
- len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen);
-
- if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval,
+ optlen);
+ if (err)
break;
- }
- iso_pi(sk)->base_len = len;
+ iso_pi(sk)->base_len = optlen;
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 467b242d8be0..84fc70862d78 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4054,8 +4054,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
return -EPROTO;
hci_dev_lock(hdev);
- if (hci_dev_test_flag(hdev, HCI_MGMT) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_device_connected(hdev, hcon, NULL, 0);
hci_dev_unlock(hdev);
@@ -7019,7 +7018,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
hcon = hci_connect_le(hdev, dst, dst_type, false,
chan->sec_level, timeout,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, 0, 0);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level, timeout,
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 4287aa6cc988..5cc83f906c12 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -439,7 +439,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -486,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mode 0x%2.2x", chan->mode);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@@ -536,7 +537,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
@@ -727,7 +728,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, err = 0;
+ int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
@@ -754,11 +755,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win;
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_sockptr(&opts, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
+ if (err)
break;
- }
if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
@@ -801,10 +800,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
break;
case L2CAP_LM:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt & L2CAP_LM_FIPS) {
err = -EINVAL;
@@ -885,7 +883,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
struct bt_security sec;
struct bt_power pwr;
struct l2cap_conn *conn;
- int len, err = 0;
+ int err = 0;
u32 opt;
u16 mtu;
u8 mode;
@@ -911,11 +909,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
sec.level = BT_SECURITY_LOW;
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_sockptr(&sec, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ if (err)
break;
- }
if (sec.level < BT_SECURITY_LOW ||
sec.level > BT_SECURITY_FIPS) {
@@ -960,10 +956,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt) {
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -975,10 +970,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_FLUSHABLE:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt > BT_FLUSHABLE_ON) {
err = -EINVAL;
@@ -1010,11 +1004,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
- len = min_t(unsigned int, sizeof(pwr), optlen);
- if (copy_from_sockptr(&pwr, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
+ if (err)
break;
- }
if (pwr.force_active)
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -1023,10 +1015,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_CHANNEL_POLICY:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
err = -EOPNOTSUPP;
break;
@@ -1055,10 +1046,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
+ if (err)
break;
- }
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
sk->sk_state == BT_CONNECTED)
@@ -1086,10 +1076,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
+ if (err)
break;
- }
BT_DBG("mode %u", mode);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 32ed6e9245a3..965f621ef865 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2623,7 +2623,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+ /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0) {
mgmt_pending_free(cmd);
goto failed;
@@ -2717,8 +2721,11 @@ update_class:
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -2784,8 +2791,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -5475,8 +5485,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
- mgmt_remove_adv_monitor_complete);
+ err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
+ mgmt_remove_adv_monitor_complete);
if (err) {
mgmt_pending_remove(cmd);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index b54e8a530f55..29aa07e9db9d 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -629,7 +629,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
switch (optname) {
case RFCOMM_LM:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+ if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) {
err = -EFAULT;
break;
}
@@ -664,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
- size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -686,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
sec.level = BT_SECURITY_LOW;
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_sockptr(&sec, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ if (err)
break;
- }
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
@@ -706,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 43daf965a01e..5d03c5440b06 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -824,7 +824,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int len, err = 0;
+ int err = 0;
struct bt_voice voice;
u32 opt;
struct bt_codecs *codecs;
@@ -843,10 +843,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -863,11 +862,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
voice.setting = sco_pi(sk)->setting;
- len = min_t(unsigned int, sizeof(voice), optlen);
- if (copy_from_sockptr(&voice, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
+ optlen);
+ if (err)
break;
- }
/* Explicitly check for these values */
if (voice.setting != BT_VOICE_TRANSPARENT &&
@@ -890,10 +888,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
@@ -934,9 +931,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(buffer, optval, optlen)) {
+ err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
+ if (err) {
hci_dev_put(hdev);
- err = -EFAULT;
break;
}
@@ -967,7 +964,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
BT_DBG("sk %p", sk);
@@ -989,7 +987,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %u", opts.mtu);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@@ -1007,7 +1005,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f21097e73482..ceaa5a89b947 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
BR_MCAST_DIR_TX);
+ BR_INPUT_SKB_CB(skb)->promisc = promisc;
+
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
@@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
+ bool promisc;
u16 vid = 0;
u8 state;
@@ -137,7 +140,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
- local_rcv = !!(br->dev->flags & IFF_PROMISC);
+ promisc = !!(br->dev->flags & IFF_PROMISC);
+ local_rcv = promisc;
+
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
/* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -200,7 +205,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
unsigned long now = jiffies;
if (test_bit(BR_FDB_LOCAL, &dst->flags))
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, false);
if (now != dst->used)
dst->used = now;
@@ -213,7 +218,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
}
if (local_rcv)
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, promisc);
out:
return 0;
@@ -386,6 +391,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
+ BR_INPUT_SKB_CB(skb)->promisc = false;
+
/* The else clause should be hit when nf_hook():
* - returns < 0 (drop/error)
* - returns = 0 (stolen/nf_queue)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 35e10c5a766d..22e35623c148 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -600,11 +600,17 @@ static unsigned int br_nf_local_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
struct nf_conntrack *nfct = skb_nfct(skb);
const struct nf_ct_hook *ct_hook;
struct nf_conn *ct;
int ret;
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2cf4fc756263..f17dbac7d828 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -667,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
{
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
- return br_info_notify(event, br, port, filter);
+ br_info_notify(event, br, port, filter);
}
/*
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 86ea5e6689b5..d4bedc87b1d8 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -589,6 +589,7 @@ struct br_input_skb_cb {
#endif
u8 proxyarp_replied:1;
u8 src_port_isolated:1;
+ u8 promisc:1;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_filtered:1;
#endif
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 99d82676f780..cbd0e3586c3f 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
struct ebt_table_info *newinfo;
struct ebt_replace tmp;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
{
struct ebt_replace hlp;
+ if (len < sizeof(hlp))
+ return -EINVAL;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
@@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg,
{
struct compat_ebt_replace hlp;
+ if (len < sizeof(hlp))
+ return -EINVAL;
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
return -EFAULT;
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 6f877e31709b..c3c51b9a6826 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- enum ip_conntrack_info ctinfo;
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
+ struct nf_conntrack *nfct = skb_nfct(skb);
struct nf_conn *ct;
- if (skb->pkt_type == PACKET_HOST)
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
+ if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
/* nf_conntrack_confirm() cannot handle concurrent clones,
* this happens for broad/multicast frames with e.g. macvlan on top
* of the bridge device.
*/
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+ ct = container_of(nfct, struct nf_conn, ct_general);
+ if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
return NF_ACCEPT;
/* let inet prerouting call conntrack again */
diff --git a/net/core/dev.c b/net/core/dev.c
index 9a67003e49db..331848eca7d3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -429,7 +429,7 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
* PP consumers must pay attention to run APIs in the appropriate context
* (e.g. NAPI context).
*/
-static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
+static DEFINE_PER_CPU(struct page_pool *, system_page_pool);
#ifdef CONFIG_LOCKDEP
/*
@@ -3775,6 +3775,10 @@ no_lock_out:
return rc;
}
+ if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
+ return NET_XMIT_DROP;
+ }
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
@@ -3814,7 +3818,9 @@ no_lock_out:
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
+ WRITE_ONCE(q->owner, smp_processor_id());
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
+ WRITE_ONCE(q->owner, -1);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
diff --git a/net/core/gro.c b/net/core/gro.c
index ee30d4f0c038..83f35d99a682 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -192,8 +192,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
}
merge:
- /* sk owenrship - if any - completely transferred to the aggregated packet */
+ /* sk ownership - if any - completely transferred to the aggregated packet */
skb->destructor = NULL;
+ skb->sk = NULL;
delta_truesize = skb->truesize;
if (offset > headlen) {
unsigned int eat = offset - headlen;
diff --git a/net/core/sock.c b/net/core/sock.c
index 43bf3818c19e..0963689a5950 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
@@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
skb->dev = NULL;
- if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+ if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 27d733c0f65e..8598466a3805 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -411,6 +411,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk;
int err = 0;
+ if (irqs_disabled())
+ return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
+
spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
@@ -933,6 +936,9 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
+ if (irqs_disabled())
+ return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
+
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 2edc8b796a4e..049c3adeb850 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
- dev->dev_addr))) {
- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
- } else {
- skb->pkt_type = PACKET_OTHERHOST;
- }
- }
+ eth_skb_pkt_type(skb, dev);
/*
* Some variants of DSA tagging don't have an ethertype field
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c98b5b71ad7c..e9d45133d641 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -132,30 +132,29 @@ static int hsr_dev_open(struct net_device *dev)
{
struct hsr_priv *hsr;
struct hsr_port *port;
- char designation;
+ const char *designation = NULL;
hsr = netdev_priv(dev);
- designation = '\0';
hsr_for_each_port(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
case HSR_PT_SLAVE_A:
- designation = 'A';
+ designation = "Slave A";
break;
case HSR_PT_SLAVE_B:
- designation = 'B';
+ designation = "Slave B";
break;
default:
- designation = '?';
+ designation = "Unknown";
}
if (!is_slave_up(port->dev))
- netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
+ netdev_warn(dev, "%s (%s) is not up; please bring it up to get a fully working HSR network\n",
designation, port->dev->name);
}
- if (designation == '\0')
+ if (!designation)
netdev_warn(dev, "No slave devices configured\n");
return 0;
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index e5742f2a2d52..1b6457f357bd 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -220,7 +220,8 @@ void hsr_del_port(struct hsr_port *port)
netdev_update_features(master->dev);
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
netdev_rx_handler_unregister(port->dev);
- dev_set_promiscuity(port->dev, -1);
+ if (!port->hsr->fwd_offloaded)
+ dev_set_promiscuity(port->dev, -1);
netdev_upper_dev_unlink(port->dev, master->dev);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 48741352a88a..c484b1c0fc00 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,6 +1050,11 @@ next:
e++;
}
}
+
+ /* Don't let NLM_DONE coalesce into a message, even if it could.
+ * Some user space expects NLM_DONE in a separate recv().
+ */
+ err = skb->len;
out:
cb->args[1] = e;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e63a3bf99617..437e782b9663 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -92,6 +92,7 @@
#include <net/inet_common.h>
#include <net/ip_fib.h>
#include <net/l3mdev.h>
+#include <net/addrconf.h>
/*
* Build xmit assembly blocks
@@ -1032,6 +1033,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
struct icmp_ext_echo_iio *iio, _iio;
struct net *net = dev_net(skb->dev);
+ struct inet6_dev *in6_dev;
+ struct in_device *in_dev;
struct net_device *dev;
char buff[IFNAMSIZ];
u16 ident_len;
@@ -1115,10 +1118,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
/* Fill bits in reply message */
if (dev->flags & IFF_UP)
status |= ICMP_EXT_ECHOREPLY_ACTIVE;
- if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
+
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && rcu_access_pointer(in_dev->ifa_list))
status |= ICMP_EXT_ECHOREPLY_IPV4;
- if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
+
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev && !list_empty(&in6_dev->addr_list))
status |= ICMP_EXT_ECHOREPLY_IPV6;
+
dev_put(dev);
icmphdr->un.echo.sequence |= htons(status);
return true;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7d8090f109ef..3b38610958ee 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -203,8 +203,15 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
- if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
- return false;
+ if (ipv6_only_sock(sk2)) {
+ if (sk->sk_family == AF_INET)
+ return false;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ return false;
+#endif
+ }
return inet_bind_conflict(sk, sk2, sk_uid, relax,
reuseport_cb_ok, reuseport_ok);
@@ -287,6 +294,7 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
+ bool conflict = false;
bool reuseport_cb_ok;
rcu_read_lock();
@@ -299,18 +307,20 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
spin_lock(&head2->lock);
- inet_bind_bucket_for_each(tb2, &head2->chain)
- if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
- break;
+ inet_bind_bucket_for_each(tb2, &head2->chain) {
+ if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
+ continue;
- if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
- reuseport_ok)) {
- spin_unlock(&head2->lock);
- return true;
+ if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok))
+ continue;
+
+ conflict = true;
+ break;
}
spin_unlock(&head2->lock);
- return false;
+
+ return conflict;
}
/*
@@ -771,6 +781,20 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
+void inet_csk_clear_xmit_timers_sync(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* ongoing timer handlers need to acquire socket lock. */
+ sock_not_owned_by_me(sk);
+
+ icsk->icsk_pending = icsk->icsk_ack.pending = 0;
+
+ sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
+ sk_stop_timer_sync(sk, &sk->sk_timer);
+}
+
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 7072fc0783ef..c88c9034d630 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -24,6 +24,8 @@
#include <net/ip.h>
#include <net/ipv6.h>
+#include "../core/sock_destructor.h"
+
/* Use skb->cb to track consecutive/adjacent fragments coming at
* the end of the queue. Nodes in the rb-tree queue will
* contain "runs" of one or more adjacent fragments.
@@ -39,6 +41,7 @@ struct ipfrag_skb_cb {
};
struct sk_buff *next_frag;
int frag_run_len;
+ int ip_defrag_offset;
};
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
@@ -396,12 +399,12 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
*/
if (!last)
fragrun_create(q, skb); /* First fragment. */
- else if (last->ip_defrag_offset + last->len < end) {
+ else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
/* This is the common case: skb goes to the end. */
/* Detect and discard overlaps. */
- if (offset < last->ip_defrag_offset + last->len)
+ if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
return IPFRAG_OVERLAP;
- if (offset == last->ip_defrag_offset + last->len)
+ if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
fragrun_append_to_last(q, skb);
else
fragrun_create(q, skb);
@@ -418,13 +421,13 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
parent = *rbn;
curr = rb_to_skb(parent);
- curr_run_end = curr->ip_defrag_offset +
+ curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
FRAG_CB(curr)->frag_run_len;
- if (end <= curr->ip_defrag_offset)
+ if (end <= FRAG_CB(curr)->ip_defrag_offset)
rbn = &parent->rb_left;
else if (offset >= curr_run_end)
rbn = &parent->rb_right;
- else if (offset >= curr->ip_defrag_offset &&
+ else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
end <= curr_run_end)
return IPFRAG_DUP;
else
@@ -438,7 +441,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
rb_insert_color(&skb->rbnode, &q->rb_fragments);
}
- skb->ip_defrag_offset = offset;
+ FRAG_CB(skb)->ip_defrag_offset = offset;
return IPFRAG_OK;
}
@@ -448,13 +451,28 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
struct sk_buff *parent)
{
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
- struct sk_buff **nextp;
+ void (*destructor)(struct sk_buff *);
+ unsigned int orig_truesize = 0;
+ struct sk_buff **nextp = NULL;
+ struct sock *sk = skb->sk;
int delta;
+ if (sk && is_skb_wmem(skb)) {
+ /* TX: skb->sk might have been passed as argument to
+ * dst->output and must remain valid until tx completes.
+ *
+ * Move sk to reassembled skb and fix up wmem accounting.
+ */
+ orig_truesize = skb->truesize;
+ destructor = skb->destructor;
+ }
+
if (head != skb) {
fp = skb_clone(skb, GFP_ATOMIC);
- if (!fp)
- return NULL;
+ if (!fp) {
+ head = skb;
+ goto out_restore_sk;
+ }
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
if (RB_EMPTY_NODE(&skb->rbnode))
FRAG_CB(parent)->next_frag = fp;
@@ -463,6 +481,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
&q->rb_fragments);
if (q->fragments_tail == skb)
q->fragments_tail = fp;
+
+ if (orig_truesize) {
+ /* prevent skb_morph from releasing sk */
+ skb->sk = NULL;
+ skb->destructor = NULL;
+ }
skb_morph(skb, head);
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
rb_replace_node(&head->rbnode, &skb->rbnode,
@@ -470,13 +494,13 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
consume_skb(head);
head = skb;
}
- WARN_ON(head->ip_defrag_offset != 0);
+ WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
delta = -head->truesize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
- return NULL;
+ goto out_restore_sk;
delta += head->truesize;
if (delta)
@@ -492,7 +516,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
- return NULL;
+ goto out_restore_sk;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
@@ -509,6 +533,21 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
nextp = &skb_shinfo(head)->frag_list;
}
+out_restore_sk:
+ if (orig_truesize) {
+ int ts_delta = head->truesize - orig_truesize;
+
+ /* if this reassembled skb is fragmented later,
+ * fraglist skbs will get skb->sk assigned from head->sk,
+ * and each frag skb will be released via sock_wfree.
+ *
+ * Update sk_wmem_alloc.
+ */
+ head->sk = sk;
+ head->destructor = destructor;
+ refcount_add(ts_delta, &sk->sk_wmem_alloc);
+ }
+
return nextp;
}
EXPORT_SYMBOL(inet_frag_reasm_prepare);
@@ -516,6 +555,8 @@ EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
void *reasm_data, bool try_coalesce)
{
+ struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
+ const unsigned int head_truesize = head->truesize;
struct sk_buff **nextp = reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
@@ -579,6 +620,9 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
head->prev = NULL;
head->tstamp = q->stamp;
head->mono_delivery_time = q->mono_delivery_time;
+
+ if (sk)
+ refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
}
EXPORT_SYMBOL(inet_frag_reasm_finish);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a4941f53b523..fb947d1613fe 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -384,6 +384,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
}
skb_dst_drop(skb);
+ skb_orphan(skb);
return -EINPROGRESS;
insert_error:
@@ -487,7 +488,6 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
struct ipq *qp;
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
- skb_orphan(skb);
/* Lookup (or create) queue header */
qp = ip_find(net, ip_hdr(skb), user, vif);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7b16c211b904..57ddcd8c62f6 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -280,8 +280,13 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
tpi->flags | TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
} else {
+ if (unlikely(!pskb_may_pull(skb,
+ gre_hdr_len + sizeof(*ershdr))))
+ return PACKET_REJECT;
+
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
+ iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
iph->saddr, iph->daddr, tpi->key);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 8f6e950163a7..1b991b889506 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -329,6 +329,7 @@ config NFT_COMPAT_ARP
config IP_NF_ARPFILTER
tristate "arptables-legacy packet filtering support"
select IP_NF_ARPTABLES
+ select NETFILTER_FAMILY_ARP
depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 2407066b0fec..14365b20f1c5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -956,6 +956,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct arpt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -964,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1254,6 +1258,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct arpt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1262,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 7da1df4997d0..fe89a056eb06 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1108,6 +1108,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ipt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1116,6 +1118,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1492,6 +1496,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ipt_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1500,6 +1506,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 74928a9d1aa4..535856b0f0ed 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -768,8 +768,10 @@ static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
struct net *net = nh->net;
int err;
- if (nexthop_notifiers_is_empty(net))
+ if (nexthop_notifiers_is_empty(net)) {
+ *hw_stats_used = false;
return 0;
+ }
err = nh_notifier_grp_hw_stats_init(&info, nh);
if (err)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c8f76f56dc16..b814fdab19f7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -926,13 +926,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->n_redirects;
-#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (log_martians &&
+ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
-#endif
}
out_put_peer:
inet_putpeer(peer);
@@ -2168,6 +2166,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
int err = -EINVAL;
u32 tag = 0;
+ if (!in_dev)
+ return -EINVAL;
+
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d20b62d52171..e767721b3a58 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2931,6 +2931,8 @@ void tcp_close(struct sock *sk, long timeout)
lock_sock(sk);
__tcp_close(sk, timeout);
release_sock(sk);
+ if (!sk->sk_net_refcnt)
+ inet_csk_clear_xmit_timers_sync(sk);
sock_put(sk);
}
EXPORT_SYMBOL(tcp_close);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 3afeeb68e8a7..781b67a52571 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -1068,6 +1068,7 @@ void tcp_ao_connect_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_ao_info *ao_info;
+ struct hlist_node *next;
union tcp_ao_addr *addr;
struct tcp_ao_key *key;
int family, l3index;
@@ -1090,7 +1091,7 @@ void tcp_ao_connect_init(struct sock *sk)
l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
sk->sk_bound_dev_if);
- hlist_for_each_entry_rcu(key, &ao_info->head, node) {
+ hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
continue;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 661d0e0d273f..420905be5f30 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -582,6 +582,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
}
DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+EXPORT_SYMBOL(udp_encap_needed_key);
+
+#if IS_ENABLED(CONFIG_IPV6)
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+EXPORT_SYMBOL(udpv6_encap_needed_key);
+#endif
+
void udp_encap_enable(void)
{
static_branch_inc(&udp_encap_needed_key);
@@ -1116,16 +1123,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip_cmsg_send(sk, msg, &ipc,
sk->sk_family == AF_INET6);
+ connected = 0;
+ }
if (unlikely(err < 0)) {
kfree(ipc.opt);
return err;
}
if (ipc.opt)
free = 1;
- connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index b9880743765c..3498dd1d0694 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -449,8 +449,9 @@ static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
NAPI_GRO_CB(p)->count++;
p->data_len += skb->len;
- /* sk owenrship - if any - completely transferred to the aggregated packet */
+ /* sk ownership - if any - completely transferred to the aggregated packet */
skb->destructor = NULL;
+ skb->sk = NULL;
p->truesize += skb->truesize;
p->len += skb->len;
@@ -551,11 +552,19 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb);
int flush = 1;
- /* we can do L4 aggregation only if the packet can't land in a tunnel
- * otherwise we could corrupt the inner stream
+ /* We can do L4 aggregation only if the packet can't land in a tunnel
+ * otherwise we could corrupt the inner stream. Detecting such packets
+ * cannot be foolproof and the aggregation might still happen in some
+ * cases. Such packets should be caught in udp_unexpected_gso later.
*/
NAPI_GRO_CB(skb)->is_flist = 0;
if (!sk || !udp_sk(sk)->gro_receive) {
+ /* If the packet was locally encapsulated in a UDP tunnel that
+ * wasn't detected above, do not GRO.
+ */
+ if (skb->encapsulation)
+ goto out;
+
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
@@ -719,13 +728,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
- skb->csum_level++;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = 0;
- }
+ __skb_incr_checksum_unnecessary(skb);
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 247bd4d8ee45..779aa6ecdd49 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2091,9 +2091,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
- result = ifp;
- in6_ifa_hold(ifp);
- break;
+ if (in6_ifa_hold_safe(ifp)) {
+ result = ifp;
+ break;
+ }
}
}
}
@@ -5416,10 +5417,11 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
err = 0;
if (fillargs.ifindex) {
- err = -ENODEV;
dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
- if (!dev)
+ if (!dev) {
+ err = -ENODEV;
goto done;
+ }
idev = __in6_dev_get(dev);
if (idev)
err = in6_dump_addrs(idev, skb, cb,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5c558dc1c683..c1f62352a481 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -651,19 +651,19 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (!w) {
/* New dump:
*
- * 1. hook callback destructor.
- */
- cb->args[3] = (long)cb->done;
- cb->done = fib6_dump_done;
-
- /*
- * 2. allocate and initialize walker.
+ * 1. allocate and initialize walker.
*/
w = kzalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
return -ENOMEM;
w->func = fib6_dump_node;
cb->args[2] = (long)w;
+
+ /* 2. hook callback destructor.
+ */
+ cb->args[3] = (long)cb->done;
+ cb->done = fib6_dump_done;
+
}
arg.skb = skb;
@@ -1385,7 +1385,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack)
{
struct fib6_table *table = rt->fib6_table;
- struct fib6_node *fn, *pn = NULL;
+ struct fib6_node *fn;
+#ifdef CONFIG_IPV6_SUBTREES
+ struct fib6_node *pn = NULL;
+#endif
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
@@ -1409,9 +1412,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
goto out;
}
+#ifdef CONFIG_IPV6_SUBTREES
pn = fn;
-#ifdef CONFIG_IPV6_SUBTREES
if (rt->fib6_src.plen) {
struct fib6_node *sn;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ca7e77e84283..c89aef524df9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -528,6 +528,9 @@ static int ip6erspan_rcv(struct sk_buff *skb,
struct ip6_tnl *tunnel;
u8 ver;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
+ return PACKET_REJECT;
+
ipv6h = ipv6_hdr(skb);
ershdr = (struct erspan_base_hdr *)skb->data;
ver = ershdr->ver;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index fd9f049d6d41..131f7bb2110d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1125,6 +1125,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ip6t_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1133,6 +1135,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1501,6 +1505,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
void *loc_cpu_entry;
struct ip6t_entry *iter;
+ if (len < sizeof(tmp))
+ return -EINVAL;
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1509,6 +1515,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 1a51a44571c3..d0dcbaca1994 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -294,6 +294,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
}
skb_dst_drop(skb);
+ skb_orphan(skb);
return -EINPROGRESS;
insert_error:
@@ -469,7 +470,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
if (fq == NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7c1e6469d091..1a4cccdd40c9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -447,7 +447,7 @@ csum_copy_err:
goto try_again;
}
-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void)
{
static_branch_inc(&udpv6_encap_needed_key);
@@ -1474,9 +1474,11 @@ do_udp_sendmsg:
ipc6.opt = opt;
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
&ipc6);
+ connected = false;
+ }
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1488,7 +1490,6 @@ do_udp_sendmsg:
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
- connected = false;
}
if (!opt) {
opt = txopt_get(np);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 312bcaeea96f..bbd347de00b4 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -174,13 +174,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
- skb->csum_level++;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = 0;
- }
+ __skb_incr_checksum_unnecessary(skb);
return 0;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f03452dc716d..f67c1d021812 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2199,15 +2199,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->u.vlan.sta) {
- ieee80211_clear_fast_rx(sta);
+ sta->sdata->u.vlan.sta)
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
- }
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);
sta->sdata = vlansdata;
+ ieee80211_check_fast_rx(sta);
ieee80211_check_fast_xmit(sta);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 80e4b9784131..ccacaed32817 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -797,6 +797,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *curr_ctx = NULL;
+ bool new_idle;
int ret = 0;
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
@@ -829,8 +830,6 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
out:
rcu_assign_pointer(link->conf->chanctx_conf, conf);
- sdata->vif.cfg.idle = !conf;
-
if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
ieee80211_recalc_chanctx_chantype(local, curr_ctx);
ieee80211_recalc_smps_chanctx(local, curr_ctx);
@@ -843,9 +842,27 @@ out:
ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
}
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR)
- ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+ if (conf) {
+ new_idle = false;
+ } else {
+ struct ieee80211_link_data *tmp;
+
+ new_idle = true;
+ for_each_sdata_link(local, tmp) {
+ if (rcu_access_pointer(tmp->conf->chanctx_conf)) {
+ new_idle = false;
+ break;
+ }
+ }
+ }
+
+ if (new_idle != sdata->vif.cfg.idle) {
+ sdata->vif.cfg.idle = new_idle;
+
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+ }
ieee80211_check_fast_xmit_iface(sdata);
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 49da401c5340..35a8ba25fa57 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -158,7 +158,7 @@ do { \
_sdata_dbg(print, sdata, "[link %d] " fmt, \
link_id, ##__VA_ARGS__); \
else \
- _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__); \
+ _sdata_dbg(print, sdata, fmt, ##__VA_ARGS__); \
} while (0)
#define link_dbg(link, fmt, ...) \
_link_id_dbg(1, (link)->sdata, (link)->link_id, \
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b6fead612b66..bd507d6b65e3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -131,7 +131,7 @@ struct ieee80211_bss {
};
/**
- * enum ieee80211_corrupt_data_flags - BSS data corruption flags
+ * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags
* @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
* @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
*
@@ -144,7 +144,7 @@ enum ieee80211_bss_corrupt_data_flags {
};
/**
- * enum ieee80211_valid_data_flags - BSS valid data flags
+ * enum ieee80211_bss_valid_data_flags - BSS valid data flags
* @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 32475da98d73..cbc9b5e40cb3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -747,6 +747,9 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_mesh_fast_tx_key key = {
+ .type = MESH_FAST_TX_TYPE_LOCAL
+ };
struct ieee80211_mesh_fast_tx *entry;
struct ieee80211s_hdr *meshhdr;
u8 sa[ETH_ALEN] __aligned(2);
@@ -782,7 +785,10 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
return false;
}
- entry = mesh_fast_tx_get(sdata, skb->data);
+ ether_addr_copy(key.addr, skb->data);
+ if (!ether_addr_equal(skb->data + ETH_ALEN, sdata->vif.addr))
+ key.type = MESH_FAST_TX_TYPE_PROXIED;
+ entry = mesh_fast_tx_get(sdata, &key);
if (!entry)
return false;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index d913ce7ba72e..3f9664e4e00c 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -135,9 +135,38 @@ struct mesh_path {
#define MESH_FAST_TX_CACHE_TIMEOUT 8000 /* msecs */
/**
+ * enum ieee80211_mesh_fast_tx_type - cached mesh fast tx entry type
+ *
+ * @MESH_FAST_TX_TYPE_LOCAL: tx from the local vif address as SA
+ * @MESH_FAST_TX_TYPE_PROXIED: local tx with a different SA (e.g. bridged)
+ * @MESH_FAST_TX_TYPE_FORWARDED: forwarded from a different mesh point
+ * @NUM_MESH_FAST_TX_TYPE: number of entry types
+ */
+enum ieee80211_mesh_fast_tx_type {
+ MESH_FAST_TX_TYPE_LOCAL,
+ MESH_FAST_TX_TYPE_PROXIED,
+ MESH_FAST_TX_TYPE_FORWARDED,
+
+ /* must be last */
+ NUM_MESH_FAST_TX_TYPE
+};
+
+
+/**
+ * struct ieee80211_mesh_fast_tx_key - cached mesh fast tx entry key
+ *
+ * @addr: The Ethernet DA for this entry
+ * @type: cache entry type
+ */
+struct ieee80211_mesh_fast_tx_key {
+ u8 addr[ETH_ALEN] __aligned(2);
+ u16 type;
+};
+
+/**
* struct ieee80211_mesh_fast_tx - cached mesh fast tx entry
* @rhash: rhashtable pointer
- * @addr_key: The Ethernet DA which is the key for this entry
+ * @key: the lookup key for this cache entry
* @fast_tx: base fast_tx data
* @hdr: cached mesh and rfc1042 headers
* @hdrlen: length of mesh + rfc1042
@@ -148,7 +177,7 @@ struct mesh_path {
*/
struct ieee80211_mesh_fast_tx {
struct rhash_head rhash;
- u8 addr_key[ETH_ALEN] __aligned(2);
+ struct ieee80211_mesh_fast_tx_key key;
struct ieee80211_fast_tx fast_tx;
u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)];
@@ -334,7 +363,8 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr);
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mesh_fast_tx_key *key);
bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 ctrl_flags);
void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 91b55d6a68b9..a6b62169f084 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -37,8 +37,8 @@ static const struct rhashtable_params mesh_rht_params = {
static const struct rhashtable_params fast_tx_rht_params = {
.nelem_hint = 10,
.automatic_shrinking = true,
- .key_len = ETH_ALEN,
- .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
+ .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
+ .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
.hashfn = mesh_table_hash,
};
@@ -431,20 +431,21 @@ static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
}
struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mesh_fast_tx_key *key)
{
struct ieee80211_mesh_fast_tx *entry;
struct mesh_tx_cache *cache;
cache = &sdata->u.mesh.tx_cache;
- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
if (!entry)
return NULL;
if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
mpath_expired(entry->mpath)) {
spin_lock_bh(&cache->walk_lock);
- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
if (entry)
mesh_fast_tx_entry_free(cache, entry);
spin_unlock_bh(&cache->walk_lock);
@@ -489,18 +490,24 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
+ build.key.type = MESH_FAST_TX_TYPE_LOCAL;
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
/* This is required to keep the mppath alive */
mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
if (!mppath)
return;
build.mppath = mppath;
+ if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
+ build.key.type = MESH_FAST_TX_TYPE_PROXIED;
} else if (ieee80211_has_a4(hdr->frame_control)) {
mppath = mpath;
} else {
return;
}
+ if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
+ build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
+
/* rate limit, in case fast xmit can't be enabled */
if (mppath->fast_tx_check == jiffies)
return;
@@ -547,7 +554,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
}
}
- memcpy(build.addr_key, mppath->dst, ETH_ALEN);
+ memcpy(build.key.addr, mppath->dst, ETH_ALEN);
build.timestamp = jiffies;
build.fast_tx.band = info->band;
build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
@@ -646,12 +653,18 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
+ struct ieee80211_mesh_fast_tx_key key = {};
struct ieee80211_mesh_fast_tx *entry;
+ int i;
+ ether_addr_copy(key.addr, addr);
spin_lock_bh(&cache->walk_lock);
- entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
- if (entry)
- mesh_fast_tx_entry_free(cache, entry);
+ for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
+ key.type = i;
+ entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
+ if (entry)
+ mesh_fast_tx_entry_free(cache, entry);
+ }
spin_unlock_bh(&cache->walk_lock);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 47a2cba8313f..3bbb216a0fc8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -616,7 +616,6 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
.from_ap = true,
.start = ies->data,
.len = ies->len,
- .mode = conn->mode,
};
struct ieee802_11_elems *elems;
struct ieee80211_supported_band *sband;
@@ -625,6 +624,7 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
int ret;
again:
+ parse_params.mode = conn->mode;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return ERR_PTR(-ENOMEM);
@@ -632,15 +632,21 @@ again:
ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
elems, false, conn, &ap_chandef);
- mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
- cbss->bssid, ieee80211_conn_mode_str(ap_mode));
-
/* this should be impossible since parsing depends on our mode */
if (WARN_ON(ap_mode > conn->mode)) {
ret = -EINVAL;
goto free;
}
+ if (conn->mode != ap_mode) {
+ conn->mode = ap_mode;
+ kfree(elems);
+ goto again;
+ }
+
+ mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+ cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
sband = sdata->local->hw.wiphy->bands[channel->band];
switch (channel->band) {
@@ -691,7 +697,6 @@ again:
break;
}
- conn->mode = ap_mode;
chanreq->oper = ap_chandef;
/* wider-bandwidth OFDMA is only done in EHT */
@@ -753,8 +758,10 @@ again:
}
/* the mode can only decrease, so this must terminate */
- if (ap_mode != conn->mode)
+ if (ap_mode != conn->mode) {
+ kfree(elems);
goto again;
+ }
mlme_link_id_dbg(sdata, link_id,
"connecting with %s mode, max bandwidth %d MHz\n",
@@ -5812,7 +5819,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
*/
if (control &
IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
- link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
+ link_removal_timeout[link_id] = get_unaligned_le16(pos);
}
removed_links &= sdata->vif.valid_links;
@@ -5837,8 +5844,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
continue;
}
- link_delay = link_conf->beacon_int *
- link_removal_timeout[link_id];
+ if (link_removal_timeout[link_id] < 1)
+ link_delay = 0;
+ else
+ link_delay = link_conf->beacon_int *
+ (link_removal_timeout[link_id] - 1);
if (!delay)
delay = link_delay;
@@ -5874,6 +5884,15 @@ static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
}
if (sdata->vif.active_links != active_links) {
+ /* usable links are affected when active_links are changed,
+ * so notify the driver about the status change
+ */
+ changed |= BSS_CHANGED_MLD_VALID_LINKS;
+ active_links &= sdata->vif.active_links;
+ if (!active_links)
+ active_links =
+ BIT(__ffs(sdata->vif.valid_links &
+ ~dormant_links));
ret = ieee80211_set_active_links(&sdata->vif, active_links);
if (ret) {
sdata_info(sdata, "Failed to set TTLM active links\n");
@@ -5888,7 +5907,6 @@ static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
goto out;
}
- changed |= BSS_CHANGED_MLD_VALID_LINKS;
sdata->vif.suspended_links = suspended_links;
if (sdata->vif.suspended_links)
changed |= BSS_CHANGED_MLD_TTLM;
@@ -6185,7 +6203,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
link->u.mgd.dtim_period = elems->dtim_period;
link->u.mgd.have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
- if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
+ !ieee80211_is_s1g_beacon(hdr->frame_control)) {
link->conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
link->conf->sync_device_ts =
@@ -7652,7 +7671,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"failed to insert STA entry for the AP (error %d)\n",
err);
- goto out_err;
+ goto out_release_chan;
}
} else
WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid));
@@ -7663,8 +7682,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return 0;
+out_release_chan:
+ ieee80211_link_release_channel(link);
out_err:
- ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0, 0);
return err;
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 23404b275457..4dc1def69548 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -877,6 +877,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
struct ieee80211_sub_if_data *sdata;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_supported_band *sband;
+ u32 mask = ~0;
rate_control_fill_sta_table(sta, info, dest, max_rates);
@@ -889,9 +890,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
if (ieee80211_is_tx_data(skb))
rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
+ if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX))
+ mask = sdata->rc_rateidx_mask[info->band];
+
if (dest[0].idx < 0)
__rate_control_send_low(&sdata->local->hw, sband, sta, info,
- sdata->rc_rateidx_mask[info->band]);
+ mask);
if (sta)
rate_fixup_ratelist(vif, sband, info, dest, max_rates);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c1f850138405..6e24864f9a40 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2763,7 +2763,10 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int hdrlen)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_mesh_fast_tx *entry = NULL;
+ struct ieee80211_mesh_fast_tx_key key = {
+ .type = MESH_FAST_TX_TYPE_FORWARDED
+ };
+ struct ieee80211_mesh_fast_tx *entry;
struct ieee80211s_hdr *mesh_hdr;
struct tid_ampdu_tx *tid_tx;
struct sta_info *sta;
@@ -2772,9 +2775,13 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
- entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1);
+ ether_addr_copy(key.addr, mesh_hdr->eaddr1);
else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
- entry = mesh_fast_tx_get(sdata, skb->data);
+ ether_addr_copy(key.addr, skb->data);
+ else
+ return false;
+
+ entry = mesh_fast_tx_get(sdata, &key);
if (!entry)
return false;
@@ -3780,6 +3787,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
}
break;
case WLAN_CATEGORY_PROTECTED_EHT:
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req.action_code))
+ break;
+
switch (mgmt->u.action.u.ttlm_req.action_code) {
case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
if (sdata->vif.type != NL80211_IFTYPE_STATION)
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 0429e59ba387..73850312580f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -648,6 +648,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
}
IEEE80211_SKB_CB(skb)->flags |= tx_flags;
+ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX;
ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
}
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6bf223e6cd1a..cfd0a62d0152 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -698,11 +698,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
- txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
- if (tx->sdata->rc_has_mcs_mask[info->band])
- txrc.rate_idx_mcs_mask =
- tx->sdata->rc_rateidx_mcs_mask[info->band];
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) {
+ txrc.rate_idx_mask = ~0;
+ } else {
+ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
+
+ if (tx->sdata->rc_has_mcs_mask[info->band])
+ txrc.rate_idx_mcs_mask =
+ tx->sdata->rc_rateidx_mcs_mask[info->band];
+ }
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3a1967bc7bad..7e74b812e366 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3937,8 +3937,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
mptcp_set_state(newsk, TCP_CLOSE);
}
} else {
- MPTCP_INC_STATS(sock_net(ssk),
- MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
tcpfallback:
newsk->sk_kern_sock = kern;
lock_sock(newsk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index dcd1c76d2a3b..73fdf423de44 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -1493,6 +1493,10 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
struct mptcp_subflow_context *subflow;
int space, cap;
+ /* bpf can land here with a wrong sk type */
+ if (sk->sk_protocol == IPPROTO_TCP)
+ return -EINVAL;
+
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
cap = sk->sk_rcvbuf >> 1;
else
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 1626dd20c68f..6042a47da61b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -905,6 +905,8 @@ dispose_child:
return child;
fallback:
+ if (fallback)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
mptcp_subflow_drop_ctx(child);
return child;
}
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index a0921adc31a9..1e689c714127 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 9505f9d188ff..6eef15648b7b 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -21,7 +21,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
proto = veth->h_vlan_encapsulated_proto;
break;
case htons(ETH_P_PPP_SES):
- proto = nf_flow_pppoe_proto(skb);
+ if (!nf_flow_pppoe_proto(skb, &proto))
+ return NF_ACCEPT;
break;
default:
proto = skb->protocol;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index e45fade76409..5383bed3d3e0 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -157,7 +157,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
tuple->encap[i].proto = skb->protocol;
break;
case htons(ETH_P_PPP_SES):
- phdr = (struct pppoe_hdr *)skb_mac_header(skb);
+ phdr = (struct pppoe_hdr *)skb_network_header(skb);
tuple->encap[i].id = ntohs(phdr->sid);
tuple->encap[i].proto = skb->protocol;
break;
@@ -273,10 +273,11 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
u32 *offset)
{
struct vlan_ethhdr *veth;
+ __be16 inner_proto;
switch (skb->protocol) {
case htons(ETH_P_8021Q):
@@ -287,7 +288,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
}
break;
case htons(ETH_P_PPP_SES):
- if (nf_flow_pppoe_proto(skb) == proto) {
+ if (nf_flow_pppoe_proto(skb, &inner_proto) &&
+ inner_proto == proto) {
*offset += PPPOE_SES_HLEN;
return true;
}
@@ -316,7 +318,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
skb_reset_network_header(skb);
break;
case htons(ETH_P_PPP_SES):
- skb->protocol = nf_flow_pppoe_proto(skb);
+ skb->protocol = __nf_flow_pppoe_proto(skb);
skb_pull(skb, PPPOE_SES_HLEN);
skb_reset_network_header(skb);
break;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5fa3d3540c93..167074283ea9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -594,6 +594,12 @@ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
+ nft_set_elem_change_active(ctx->net, set, ext);
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
return 0;
@@ -617,6 +623,7 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
if (!nft_set_elem_active(ext, genmask))
continue;
+ nft_set_elem_change_active(ctx->net, set, ext);
nft_setelem_data_deactivate(ctx->net, set, catchall->elem);
break;
}
@@ -626,6 +633,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
+ .type = NFT_ITER_UPDATE,
.fn = nft_mapelem_deactivate,
};
@@ -1200,6 +1208,26 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
__NFT_TABLE_F_WAS_AWAKEN | \
__NFT_TABLE_F_WAS_ORPHAN)
+static bool nft_table_pending_update(const struct nft_ctx *ctx)
+{
+ struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+ struct nft_trans *trans;
+
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return true;
+
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->ctx.table == ctx->table &&
+ ((trans->msg_type == NFT_MSG_NEWCHAIN &&
+ nft_trans_chain_update(trans)) ||
+ (trans->msg_type == NFT_MSG_DELCHAIN &&
+ nft_is_base_chain(trans->ctx.chain))))
+ return true;
+ }
+
+ return false;
+}
+
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
@@ -1226,7 +1254,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
- if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ if (nft_table_pending_update(ctx))
return -EINVAL;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
@@ -2430,6 +2458,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct nft_stats __percpu *stats = NULL;
struct nft_chain_hook hook = {};
+ if (table->flags & __NFT_TABLE_F_UPDATE)
+ return -EINVAL;
+
if (flags & NFT_CHAIN_BINDING)
return -EOPNOTSUPP;
@@ -2631,6 +2662,13 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
}
+ if (table->flags & __NFT_TABLE_F_UPDATE &&
+ !list_empty(&hook.list)) {
+ NL_SET_BAD_ATTR(extack, attr);
+ err = -EOPNOTSUPP;
+ goto err_hooks;
+ }
+
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
nft_is_base_chain(chain) &&
!list_empty(&hook.list)) {
@@ -2860,6 +2898,9 @@ static int nft_delchain_hook(struct nft_ctx *ctx,
struct nft_trans *trans;
int err;
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return -EOPNOTSUPP;
+
err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook,
ctx->family, chain->flags, extack);
if (err < 0)
@@ -2944,7 +2985,8 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (nla[NFTA_CHAIN_HOOK]) {
- if (chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN ||
+ chain->flags & NFT_CHAIN_HW_OFFLOAD)
return -EOPNOTSUPP;
if (nft_is_base_chain(chain)) {
@@ -3026,7 +3068,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
{
const struct nft_expr_type *type, *candidate = NULL;
- list_for_each_entry(type, &nf_tables_expressions, list) {
+ list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name)) {
if (!type->family && !candidate)
candidate = type;
@@ -3058,9 +3100,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
+ rcu_read_lock();
type = __nft_expr_type_get(family, nla);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -3841,6 +3887,9 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
const struct nft_data *data;
int err;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
@@ -3864,17 +3913,20 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
{
- u8 genmask = nft_genmask_next(ctx->net);
+ struct nft_set_iter dummy_iter = {
+ .genmask = nft_genmask_next(ctx->net),
+ };
struct nft_set_elem_catchall *catchall;
+
struct nft_set_ext *ext;
int ret = 0;
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_active(ext, genmask))
+ if (!nft_set_elem_active(ext, dummy_iter.genmask))
continue;
- ret = nft_setelem_validate(ctx, set, NULL, catchall->elem);
+ ret = nft_setelem_validate(ctx, set, &dummy_iter, catchall->elem);
if (ret < 0)
return ret;
}
@@ -5363,6 +5415,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
return nft_setelem_data_validate(ctx, set, elem_priv);
}
@@ -5407,6 +5464,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
iter.genmask = nft_genmask_next(ctx->net);
+ iter.type = NFT_ITER_UPDATE;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
@@ -5454,6 +5512,13 @@ static int nft_mapelem_activate(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+ /* called from abort path, reverse check to undo changes. */
+ if (nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
+ nft_clear(ctx->net, ext);
nft_setelem_data_activate(ctx->net, set, elem_priv);
return 0;
@@ -5471,6 +5536,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
if (!nft_set_elem_active(ext, genmask))
continue;
+ nft_clear(ctx->net, ext);
nft_setelem_data_activate(ctx->net, set, catchall->elem);
break;
}
@@ -5480,6 +5546,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
{
struct nft_set_iter iter = {
.genmask = nft_genmask_next(ctx->net),
+ .type = NFT_ITER_UPDATE,
.fn = nft_mapelem_activate,
};
@@ -5744,6 +5811,9 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
struct nft_set_dump_args *args;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
return 0;
@@ -5854,6 +5924,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
args.skb = skb;
args.reset = dump_ctx->reset;
args.iter.genmask = nft_genmask_cur(net);
+ args.iter.type = NFT_ITER_READ;
args.iter.skip = cb->args[0];
args.iter.count = 0;
args.iter.err = 0;
@@ -6593,7 +6664,7 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
if (nft_setelem_is_catchall(set, elem_priv)) {
- nft_set_elem_change_active(net, set, ext);
+ nft_clear(net, ext);
} else {
set->ops->activate(net, set, elem_priv);
}
@@ -7152,6 +7223,16 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
}
}
+static int nft_setelem_active_next(const struct net *net,
+ const struct nft_set *set,
+ struct nft_elem_priv *elem_priv)
+{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+ u8 genmask = nft_genmask_next(net);
+
+ return nft_set_elem_active(ext, genmask);
+}
+
static void nft_setelem_data_activate(const struct net *net,
const struct nft_set *set,
struct nft_elem_priv *elem_priv)
@@ -7275,8 +7356,12 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_elem_priv *elem_priv)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
struct nft_trans *trans;
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
sizeof(struct nft_trans_elem), GFP_ATOMIC);
if (!trans)
@@ -7338,6 +7423,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
{
struct nft_set_iter iter = {
.genmask = genmask,
+ .type = NFT_ITER_UPDATE,
.fn = nft_setelem_flush,
};
@@ -7573,7 +7659,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
{
const struct nft_object_type *type;
- list_for_each_entry(type, &nf_tables_objects, list) {
+ list_for_each_entry_rcu(type, &nf_tables_objects, list) {
if (type->family != NFPROTO_UNSPEC &&
type->family != family)
continue;
@@ -7589,9 +7675,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
{
const struct nft_object_type *type;
+ rcu_read_lock();
type = __nft_obj_type_get(objtype, family);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -8263,11 +8353,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
return err;
}
+/* call under rcu_read_lock */
static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
{
const struct nf_flowtable_type *type;
- list_for_each_entry(type, &nf_tables_flowtables, list) {
+ list_for_each_entry_rcu(type, &nf_tables_flowtables, list) {
if (family == type->family)
return type;
}
@@ -8279,9 +8370,13 @@ nft_flowtable_type_get(struct net *net, u8 family)
{
const struct nf_flowtable_type *type;
+ rcu_read_lock();
type = __nft_flowtable_type_get(family);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -10182,9 +10277,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
if (nft_trans_chain_update(trans)) {
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
&nft_trans_chain_hooks(trans));
- nft_netdev_unregister_hooks(net,
- &nft_trans_chain_hooks(trans),
- true);
+ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
+ nft_netdev_unregister_hooks(net,
+ &nft_trans_chain_hooks(trans),
+ true);
+ }
} else {
nft_chain_del(trans->ctx.chain);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
@@ -10423,10 +10520,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
struct nft_trans *trans, *next;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
+ int err = 0;
if (action == NFNL_ABORT_VALIDATE &&
nf_tables_validate(net) < 0)
- return -EAGAIN;
+ err = -EAGAIN;
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
@@ -10460,9 +10558,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- nft_netdev_unregister_hooks(net,
- &nft_trans_chain_hooks(trans),
- true);
+ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
+ nft_netdev_unregister_hooks(net,
+ &nft_trans_chain_hooks(trans),
+ true);
+ }
free_percpu(nft_trans_chain_stats(trans));
kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);
@@ -10554,8 +10654,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
case NFT_MSG_DESTROYSETELEM:
te = (struct nft_trans_elem *)trans->data;
- nft_setelem_data_activate(net, te->set, te->elem_priv);
- nft_setelem_activate(net, te->set, te->elem_priv);
+ if (!nft_setelem_active_next(net, te->set, te->elem_priv)) {
+ nft_setelem_data_activate(net, te->set, te->elem_priv);
+ nft_setelem_activate(net, te->set, te->elem_priv);
+ }
if (!nft_setelem_is_catchall(te->set, te->elem_priv))
te->set->ndeact--;
@@ -10616,12 +10718,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nf_tables_abort_release(trans);
}
- if (action == NFNL_ABORT_AUTOLOAD)
- nf_tables_module_autoload(net);
- else
- nf_tables_module_autoload_cleanup(net);
-
- return 0;
+ return err;
}
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
@@ -10634,6 +10731,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
gc_seq = nft_gc_seq_begin(nft_net);
ret = __nf_tables_abort(net, action);
nft_gc_seq_end(nft_net, gc_seq);
+
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+
+ /* module autoload needs to happen after GC sequence update because it
+ * temporarily releases and grabs mutex again.
+ */
+ if (action == NFNL_ABORT_AUTOLOAD)
+ nf_tables_module_autoload(net);
+ else
+ nf_tables_module_autoload_cleanup(net);
+
mutex_unlock(&nft_net->commit_mutex);
return ret;
@@ -10737,6 +10845,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+ if (!nft_set_elem_active(ext, iter->genmask))
+ return 0;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
@@ -10821,6 +10932,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
continue;
iter.genmask = nft_genmask_next(ctx->net);
+ iter.type = NFT_ITER_UPDATE;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
@@ -11439,9 +11551,10 @@ static void __net_exit nf_tables_exit_net(struct net *net)
gc_seq = nft_gc_seq_begin(nft_net);
- if (!list_empty(&nft_net->commit_list) ||
- !list_empty(&nft_net->module_list))
- __nf_tables_abort(net, NFNL_ABORT_NONE);
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+
+ if (!list_empty(&nft_net->module_list))
+ nf_tables_module_autoload_cleanup(net);
__nft_release_tables(net);
@@ -11533,6 +11646,7 @@ static void __exit nf_tables_module_exit(void)
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
nft_chain_filter_fini();
nft_chain_route_fini();
+ nf_tables_trans_destroy_flush_work();
unregister_pernet_subsys(&nf_tables_net_ops);
cancel_work_sync(&trans_gc_work);
cancel_work_sync(&trans_destroy_work);
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 274b6f7e6bb5..d170758a1eb5 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
return;
if (n > 1) {
- nf_unregister_net_hook(ctx->net, &found->ops);
+ if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
+ nf_unregister_net_hook(ctx->net, &found->ops);
+
list_del_rcu(&found->list);
kfree_rcu(found, rcu);
return;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index a0055f510e31..b314ca728a29 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -216,6 +216,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
return 0;
iter.genmask = nft_genmask_next(ctx->net);
+ iter.type = NFT_ITER_UPDATE;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 32df7a16835d..1caa04619dc6 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -172,7 +172,7 @@ static void nft_bitmap_activate(const struct net *net,
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off);
- nft_set_elem_change_active(net, set, &be->ext);
+ nft_clear(net, &be->ext);
}
static void nft_bitmap_flush(const struct net *net,
@@ -222,8 +222,6 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
list_for_each_entry_rcu(be, &priv->list, head) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&be->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &be->priv);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 6968a3b34236..daa56dda737a 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -199,7 +199,7 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
{
struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &he->ext);
+ nft_clear(net, &he->ext);
}
static void nft_rhash_flush(const struct net *net,
@@ -286,8 +286,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&he->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &he->priv);
if (iter->err < 0)
@@ -599,7 +597,7 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
{
struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &he->ext);
+ nft_clear(net, &he->ext);
}
static void nft_hash_flush(const struct net *net,
@@ -652,8 +650,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
hlist_for_each_entry_rcu(he, &priv->table[i], node) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&he->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &he->priv);
if (iter->err < 0)
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index df8de5090246..187138afac45 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1847,7 +1847,7 @@ static void nft_pipapo_activate(const struct net *net,
{
struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &e->ext);
+ nft_clear(net, &e->ext);
}
/**
@@ -2077,6 +2077,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
rules_fx = rules_f0;
nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+
if (!pipapo_match_field(f, start, rules_fx,
match_start, match_end))
break;
@@ -2089,16 +2091,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
- }
- if (i == m->field_count) {
- priv->dirty = true;
- pipapo_drop(m, rulemap);
- return;
+ if (last && f->mt[rulemap[i].to].e == e) {
+ priv->dirty = true;
+ pipapo_drop(m, rulemap);
+ return;
+ }
}
first_rule += rules_f0;
}
+
+ WARN_ON_ONCE(1); /* elem_priv not found */
}
/**
@@ -2115,13 +2119,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_pipapo *priv = nft_set_priv(set);
- struct net *net = read_pnet(&set->net);
const struct nft_pipapo_match *m;
const struct nft_pipapo_field *f;
unsigned int i, r;
+ WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
+ iter->type != NFT_ITER_UPDATE);
+
rcu_read_lock();
- if (iter->genmask == nft_genmask_cur(net))
+ if (iter->type == NFT_ITER_READ)
m = rcu_dereference(priv->match);
else
m = priv->clone;
@@ -2143,9 +2149,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
e = f->mt[r].e;
- if (!nft_set_elem_active(&e->ext, iter->genmask))
- goto cont;
-
iter->err = iter->fn(ctx, set, iter, &e->priv);
if (iter->err < 0)
goto out;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 9944fe479e53..b7ea21327549 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -532,7 +532,7 @@ static void nft_rbtree_activate(const struct net *net,
{
struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
- nft_set_elem_change_active(net, set, &rbe->ext);
+ nft_clear(net, &rbe->ext);
}
static void nft_rbtree_flush(const struct net *net,
@@ -600,8 +600,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&rbe->ext, iter->genmask))
- goto cont;
iter->err = iter->fn(ctx, set, iter, &rbe->priv);
if (iter->err < 0) {
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 819157bbb5a2..d5344563e525 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt > LLCP_MAX_RW) {
err = -EINVAL;
@@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt > LLCP_MAX_MIUX) {
err = -EINVAL;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index cdad47b140fa..0d26c8ec9993 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1516,6 +1516,11 @@ static void nci_rx_work(struct work_struct *work)
nfc_send_to_raw_sock(ndev->nfc_dev, skb,
RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+ if (!nci_plen(skb->data)) {
+ kfree_skb(skb);
+ break;
+ }
+
/* Process frame */
switch (nci_mt(skb->data)) {
case NCI_MT_RSP_PKT:
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 3019a4406ca4..2928c142a2dd 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1380,8 +1380,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (ct_info.timeout[0]) {
if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
ct_info.timeout))
- pr_info_ratelimited("Failed to associated timeout "
- "policy `%s'\n", ct_info.timeout);
+ OVS_NLERR(log,
+ "Failed to associated timeout policy '%s'",
+ ct_info.timeout);
else
ct_info.nf_ct_timeout = rcu_dereference(
nf_ct_timeout_find(ct_info.ct)->timeout);
@@ -1592,9 +1593,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
+ struct hlist_node *next;
- hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
- lockdep_ovsl_is_held())
+ hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
kfree_rcu(ct_limit, rcu);
}
kfree(info->limits);
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index a4e3c5de998b..00dbcd4d28e6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -302,7 +302,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
}
ret = PTR_ERR(trans_private);
/* Trigger connection so that its ready for the next retry */
- if (ret == -ENODEV)
+ if (ret == -ENODEV && cp)
rds_conn_connect_if_down(cp->cp_conn);
goto out;
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 39945b139c48..cd0accaf844a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -241,13 +241,13 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_skbmod *d = to_skbmod(a);
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbmod_params *p;
- struct tc_skbmod opt = {
- .index = d->tcf_index,
- .refcnt = refcount_read(&d->tcf_refcnt) - ref,
- .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
- };
+ struct tc_skbmod opt;
struct tcf_t t;
+ memset(&opt, 0, sizeof(opt));
+ opt.index = d->tcf_index;
+ opt.refcnt = refcount_read(&d->tcf_refcnt) - ref,
+ opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
spin_lock_bh(&d->tcf_lock);
opt.action = d->tcf_action;
p = rcu_dereference_protected(d->skbmod_p,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 65e05b0c98e4..60239378d43f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -809,7 +809,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
!qdisc_is_offloaded);
/* TODO: perform the search on a per txq basis */
- sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
+ sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
WARN_ON_ONCE(parentid != TC_H_ROOT);
break;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ff5336493777..4a2c763e2d11 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -974,6 +974,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
+ sch->owner = -1;
netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index b2c1b683a88e..d2b02710ab07 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -921,8 +921,6 @@ out_err:
* Caller provides the truncation length of the output token (h) in
* cksumout.len.
*
- * Note that for RPCSEC, the "initial cipher state" is always all zeroes.
- *
* Return values:
* %GSS_S_COMPLETE: Digest computed, @cksumout filled in
* %GSS_S_FAILURE: Call failed
@@ -933,19 +931,22 @@ u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
int body_offset, struct xdr_netobj *cksumout)
{
unsigned int ivsize = crypto_sync_skcipher_ivsize(cipher);
- static const u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct ahash_request *req;
struct scatterlist sg[1];
+ u8 *iv, *checksumdata;
int err = -ENOMEM;
- u8 *checksumdata;
checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
if (!checksumdata)
return GSS_S_FAILURE;
+ /* For RPCSEC, the "initial cipher state" is always all zeroes. */
+ iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!iv)
+ goto out_free_mem;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
- goto out_free_cksumdata;
+ goto out_free_mem;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
err = crypto_ahash_init(req);
if (err)
@@ -969,7 +970,8 @@ u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
out_free_ahash:
ahash_request_free(req);
-out_free_cksumdata:
+out_free_mem:
+ kfree(iv);
kfree_sensitive(checksumdata);
return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 545017a3daa4..6b3f01beb294 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1206,15 +1206,6 @@ err_noclose:
* MSG_SPLICE_PAGES is used exclusively to reduce the number of
* copy operations in this path. Therefore the caller must ensure
* that the pages backing @xdr are unchanging.
- *
- * Note that the send is non-blocking. The caller has incremented
- * the reference count on each page backing the RPC message, and
- * the network layer will "put" these pages when transmission is
- * complete.
- *
- * This is safe for our RPC services because the memory backing
- * the head and tail components is never kmalloc'd. These always
- * come from pages in the svc_rqst::rq_pages array.
*/
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
rpc_fraghdr marker, unsigned int *sentp)
@@ -1244,6 +1235,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
+ page_frag_free(buf);
if (ret < 0)
return ret;
*sentp += ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index f2a100c4c81f..40797114d50a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -231,28 +231,6 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
}
/**
- * svc_rdma_write_chunk_release - Release Write chunk I/O resources
- * @rdma: controlling transport
- * @ctxt: Send context that is being released
- */
-void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt)
-{
- struct svc_rdma_write_info *info;
- struct svc_rdma_chunk_ctxt *cc;
-
- while (!list_empty(&ctxt->sc_write_info_list)) {
- info = list_first_entry(&ctxt->sc_write_info_list,
- struct svc_rdma_write_info, wi_list);
- list_del(&info->wi_list);
-
- cc = &info->wi_cc;
- svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
- svc_rdma_write_info_free(info);
- }
-}
-
-/**
* svc_rdma_reply_chunk_release - Release Reply chunk I/O resources
* @rdma: controlling transport
* @ctxt: Send context that is being released
@@ -308,11 +286,13 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_chunk_ctxt *cc =
container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
+ struct svc_rdma_write_info *info =
+ container_of(cc, struct svc_rdma_write_info, wi_cc);
switch (wc->status) {
case IB_WC_SUCCESS:
trace_svcrdma_wc_write(&cc->cc_cid);
- return;
+ break;
case IB_WC_WR_FLUSH_ERR:
trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
break;
@@ -320,11 +300,12 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
}
- /* The RDMA Write has flushed, so the client won't get
- * some of the outgoing RPC message. Signal the loss
- * to the client by closing the connection.
- */
- svc_xprt_deferred_close(&rdma->sc_xprt);
+ svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ svc_xprt_deferred_close(&rdma->sc_xprt);
+
+ svc_rdma_write_info_free(info);
}
/**
@@ -620,19 +601,13 @@ static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
return xdr->len;
}
-/* Link Write WRs for @chunk onto @sctxt's WR chain.
- */
-static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_chunk *chunk,
- const struct xdr_buf *xdr)
+static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_chunk *chunk,
+ const struct xdr_buf *xdr)
{
struct svc_rdma_write_info *info;
struct svc_rdma_chunk_ctxt *cc;
- struct ib_send_wr *first_wr;
struct xdr_buf payload;
- struct list_head *pos;
- struct ib_cqe *cqe;
int ret;
if (xdr_buf_subsegment(xdr, &payload, chunk->ch_position,
@@ -648,25 +623,10 @@ static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
if (ret != payload.len)
goto out_err;
- ret = -EINVAL;
- if (unlikely(cc->cc_sqecount > rdma->sc_sq_depth))
- goto out_err;
-
- first_wr = sctxt->sc_wr_chain;
- cqe = &cc->cc_cqe;
- list_for_each(pos, &cc->cc_rwctxts) {
- struct svc_rdma_rw_ctxt *rwc;
-
- rwc = list_entry(pos, struct svc_rdma_rw_ctxt, rw_list);
- first_wr = rdma_rw_ctx_wrs(&rwc->rw_ctx, rdma->sc_qp,
- rdma->sc_port_num, cqe, first_wr);
- cqe = NULL;
- }
- sctxt->sc_wr_chain = first_wr;
- sctxt->sc_sqecount += cc->cc_sqecount;
- list_add(&info->wi_list, &sctxt->sc_write_info_list);
-
trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
+ ret = svc_rdma_post_chunk_ctxt(rdma, cc);
+ if (ret < 0)
+ goto out_err;
return 0;
out_err:
@@ -675,27 +635,25 @@ out_err:
}
/**
- * svc_rdma_prepare_write_list - Construct WR chain for sending Write list
+ * svc_rdma_send_write_list - Send all chunks on the Write list
* @rdma: controlling RDMA transport
- * @write_pcl: Write list provisioned by the client
- * @sctxt: Send WR resources
+ * @rctxt: Write list provisioned by the client
* @xdr: xdr_buf containing an RPC Reply message
*
* Returns zero on success, or a negative errno if one or more
* Write chunks could not be sent.
*/
-int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
- const struct svc_rdma_pcl *write_pcl,
- struct svc_rdma_send_ctxt *sctxt,
- const struct xdr_buf *xdr)
+int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr)
{
struct svc_rdma_chunk *chunk;
int ret;
- pcl_for_each_chunk(chunk, write_pcl) {
+ pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
if (!chunk->ch_payload_length)
break;
- ret = svc_rdma_prepare_write_chunk(rdma, sctxt, chunk, xdr);
+ ret = svc_rdma_send_write_chunk(rdma, chunk, xdr);
if (ret < 0)
return ret;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index dfca39abd16c..bb5436b719e0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -142,7 +142,6 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
ctxt->sc_cqe.done = svc_rdma_wc_send;
- INIT_LIST_HEAD(&ctxt->sc_write_info_list);
ctxt->sc_xprt_buf = buffer;
xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
rdma->sc_max_req_size);
@@ -228,7 +227,6 @@ static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
struct ib_device *device = rdma->sc_cm_id->device;
unsigned int i;
- svc_rdma_write_chunk_release(rdma, ctxt);
svc_rdma_reply_chunk_release(rdma, ctxt);
if (ctxt->sc_page_count)
@@ -1015,8 +1013,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (!p)
goto put_ctxt;
- ret = svc_rdma_prepare_write_list(rdma, &rctxt->rc_write_pcl, sctxt,
- &rqstp->rq_res);
+ ret = svc_rdma_send_write_list(rdma, rctxt, &rqstp->rq_res);
if (ret < 0)
goto put_ctxt;
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 762f424ff2d5..e5e47452308a 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -215,7 +215,7 @@ static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
{
- return ctx->strp.msg_ready;
+ return READ_ONCE(ctx->strp.msg_ready);
}
static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index ca1e0e198ceb..5df08d848b5c 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -360,7 +360,7 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
if (strp->stm.full_len && strp->stm.full_len == skb->len) {
desc->count = 0;
- strp->msg_ready = 1;
+ WRITE_ONCE(strp->msg_ready, 1);
tls_rx_msg_ready(strp);
}
@@ -528,7 +528,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
if (!tls_strp_check_queue_ok(strp))
return tls_strp_read_copy(strp, false);
- strp->msg_ready = 1;
+ WRITE_ONCE(strp->msg_ready, 1);
tls_rx_msg_ready(strp);
return 0;
@@ -580,7 +580,7 @@ void tls_strp_msg_done(struct tls_strparser *strp)
else
tls_strp_flush_anchor_copy(strp);
- strp->msg_ready = 0;
+ WRITE_ONCE(strp->msg_ready, 0);
memset(&strp->stm, 0, sizeof(strp->stm));
tls_strp_check_rcv(strp);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 211f57164cb6..b783231668c6 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1976,10 +1976,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
- psock = sk_psock_get(sk);
err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
if (err < 0)
return err;
+ psock = sk_psock_get(sk);
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
@@ -2152,12 +2152,15 @@ recv_end:
}
/* Drain records from the rx_list & copy if required */
- if (is_peek || is_kvec)
+ if (is_peek)
err = process_rx_list(ctx, msg, &control, copied + peeked,
decrypted - peeked, is_peek, NULL);
else
err = process_rx_list(ctx, msg, &control, 0,
async_copy_bytes, is_peek, NULL);
+
+ /* we could have copied less than we wanted, and possibly nothing */
+ decrypted += max(err, 0) - async_copy_bytes;
}
copied += decrypted;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5b41e2321209..9a6ad5974dff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2663,9 +2663,13 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
}
- } else if (!(flags & MSG_PEEK)) {
+ } else if (flags & MSG_PEEK) {
+ skb = NULL;
+ } else {
skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
+ WRITE_ONCE(u->oob_skb, NULL);
+ if (!WARN_ON_ONCE(skb_unref(skb)))
+ kfree_skb(skb);
skb = skb_peek(&sk->sk_receive_queue);
}
}
@@ -2739,18 +2743,16 @@ redo:
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
+again:
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (skb) {
skb = manage_oob(skb, sk, flags, copied);
- if (!skb) {
+ if (!skb && copied) {
unix_state_unlock(sk);
- if (copied)
- break;
- goto redo;
+ break;
}
}
#endif
-again:
if (skb == NULL) {
if (copied >= target)
goto unlock;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index fa39b6265238..0104be9d4704 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -274,11 +274,22 @@ static void __unix_gc(struct work_struct *work)
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
+ *
+ * Embryos, though never candidates themselves, affect which
+ * candidates are reachable by the garbage collector. Before
+ * being added to a listener's queue, an embryo may already
+ * receive data carrying SCM_RIGHTS, potentially making the
+ * passed socket a candidate that is not yet reachable by the
+ * collector. It becomes reachable once the embryo is
+ * enqueued. Therefore, we must ensure that no SCM-laden
+ * embryo appears in a (candidate) listener's queue between
+ * consecutive scan_children() calls.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
+ struct sock *sk = &u->sk;
long total_refs;
- total_refs = file_count(u->sk.sk_socket->file);
+ total_refs = file_count(sk->sk_socket->file);
WARN_ON_ONCE(!u->inflight);
WARN_ON_ONCE(total_refs < u->inflight);
@@ -286,6 +297,11 @@ static void __unix_gc(struct work_struct *work)
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+
+ if (sk->sk_state == TCP_LISTEN) {
+ unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
+ unix_state_unlock(sk);
+ }
}
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 1748268e0694..ee5d306a96d0 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -120,7 +120,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
if (!skb)
break;
- virtio_transport_deliver_tap_pkt(skb);
reply = virtio_vsock_skb_reply(skb);
sgs = vsock->out_sgs;
sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
@@ -170,6 +169,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
break;
}
+ virtio_transport_deliver_tap_pkt(skb);
+
if (reply) {
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
int val;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b4edba6b0b7b..30ff9a470813 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14030,6 +14030,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
error:
for (i = 0; i < new_coalesce.n_rules; i++) {
tmp_rule = &new_coalesce.rules[i];
+ if (!tmp_rule)
+ continue;
for (j = 0; j < tmp_rule->n_patterns; j++)
kfree(tmp_rule->patterns[j].mask);
kfree(tmp_rule->patterns);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index e039e66ab377..df013c98b80d 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1024,7 +1024,7 @@ TRACE_EVENT(rdev_get_mpp,
TRACE_EVENT(rdev_dump_mpp,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *dst, u8 *mpp),
- TP_ARGS(wiphy, netdev, _idx, mpp, dst),
+ TP_ARGS(wiphy, netdev, _idx, dst, mpp),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -1758,7 +1758,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
DECLARE_EVENT_CLASS(tx_rx_evt,
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, rx, tx),
+ TP_ARGS(wiphy, tx, rx),
TP_STRUCT__entry(
WIPHY_ENTRY
__field(u32, tx)
@@ -1775,7 +1775,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, rx, tx)
+ TP_ARGS(wiphy, tx, rx)
);
DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index a161c64d1765..838ad6541a17 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -4,6 +4,7 @@
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
* Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2024 Intel Corporation
*
* (As all part of the Linux kernel, this file is GPL)
*/
@@ -662,7 +663,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
dev->ieee80211_ptr->wiphy->wext &&
dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev);
}
@@ -704,7 +706,8 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
#ifdef CONFIG_CFG80211_WEXT
if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
handlers = dev->ieee80211_ptr->wiphy->wext;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3404d076a8a3..727aa20be4bd 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1417,6 +1417,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
struct xsk_queue **q;
int entries;
+ if (optlen < sizeof(entries))
+ return -EINVAL;
if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
diff --git a/rust/Makefile b/rust/Makefile
index 846e6ab9d5a9..86a125c4243c 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -175,7 +175,6 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_flags) \
- @$(objtree)/include/generated/rustc_cfg \
-L$(objtree)/$(obj) --extern alloc --extern kernel \
--extern build_error --extern macros \
--extern bindings --extern uapi \
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 424257284d16..09004b56fb65 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -1292,8 +1292,15 @@ impl_zeroable! {
i8, i16, i32, i64, i128, isize,
f32, f64,
- // SAFETY: These are ZSTs, there is nothing to zero.
- {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, Infallible, (),
+ // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list;
+ // creating an instance of an uninhabited type is immediate undefined behavior. For more on
+ // uninhabited/empty types, consult The Rustonomicon:
+ // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference
+ // also has information on undefined behavior:
+ // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>.
+ //
+ // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists.
+ {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),
// SAFETY: Type is allowed to take any value, including all zeros.
{<T>} MaybeUninit<T>,
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index be68d5e567b1..6858e2f8a3ed 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -65,7 +65,7 @@ const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
/// The top level entrypoint to implementing a kernel module.
///
/// For any teardown or cleanup operations, your type may implement [`Drop`].
-pub trait Module: Sized + Sync {
+pub trait Module: Sized + Sync + Send {
/// Called at module initialization time.
///
/// Use this method to perform whatever setup or registration your module
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 96e09c6e8530..265d0e1c1371 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -640,6 +640,10 @@ pub struct Registration {
drivers: Pin<&'static mut [DriverVTable]>,
}
+// SAFETY: The only action allowed in a `Registration` instance is dropping it, which is safe to do
+// from any thread because `phy_drivers_unregister` can be called from any thread context.
+unsafe impl Send for Registration {}
+
impl Registration {
/// Registers a PHY driver.
pub fn register(
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index f489f3157383..520eae5fd792 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -35,18 +35,6 @@ use proc_macro::TokenStream;
/// author: "Rust for Linux Contributors",
/// description: "My very own kernel module!",
/// license: "GPL",
-/// params: {
-/// my_i32: i32 {
-/// default: 42,
-/// permissions: 0o000,
-/// description: "Example of i32",
-/// },
-/// writeable_i32: i32 {
-/// default: 42,
-/// permissions: 0o644,
-/// description: "Example of i32",
-/// },
-/// },
/// }
///
/// struct MyModule;
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 27979e582e4b..acd0393b5095 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -199,17 +199,6 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
/// Used by the printing macros, e.g. [`info!`].
const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
- /// The \"Rust loadable module\" mark.
- //
- // This may be best done another way later on, e.g. as a new modinfo
- // key or a new section. For the moment, keep it simple.
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[used]
- static __IS_RUST_MODULE: () = ();
-
- static mut __MOD: Option<{type_}> = None;
-
// SAFETY: `__this_module` is constructed by the kernel at load time and will not be
// freed until the module is unloaded.
#[cfg(MODULE)]
@@ -221,81 +210,132 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
kernel::ThisModule::from_ptr(core::ptr::null_mut())
}};
- // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
- /// # Safety
- ///
- /// This function must not be called after module initialization, because it may be
- /// freed after that completes.
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[no_mangle]
- #[link_section = \".init.text\"]
- pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
- __init()
- }}
-
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn cleanup_module() {{
- __exit()
- }}
+ // Double nested modules, since then nobody can access the public items inside.
+ mod __module_init {{
+ mod __module_init {{
+ use super::super::{type_};
+
+ /// The \"Rust loadable module\" mark.
+ //
+ // This may be best done another way later on, e.g. as a new modinfo
+ // key or a new section. For the moment, keep it simple.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ static __IS_RUST_MODULE: () = ();
+
+ static mut __MOD: Option<{type_}> = None;
+
+ // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
+ /// # Safety
+ ///
+ /// This function must not be called after module initialization, because it may be
+ /// freed after that completes.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ #[link_section = \".init.text\"]
+ pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
+ // SAFETY: This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name.
+ unsafe {{ __init() }}
+ }}
- // Built-in modules are initialized through an initcall pointer
- // and the identifiers need to be unique.
- #[cfg(not(MODULE))]
- #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
- #[doc(hidden)]
- #[link_section = \"{initcall_section}\"]
- #[used]
- pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn cleanup_module() {{
+ // SAFETY:
+ // - This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name,
+ // - furthermore it is only called after `init_module` has returned `0`
+ // (which delegates to `__init`).
+ unsafe {{ __exit() }}
+ }}
- #[cfg(not(MODULE))]
- #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
- core::arch::global_asm!(
- r#\".section \"{initcall_section}\", \"a\"
- __{name}_initcall:
- .long __{name}_init - .
- .previous
- \"#
- );
+ // Built-in modules are initialized through an initcall pointer
+ // and the identifiers need to be unique.
+ #[cfg(not(MODULE))]
+ #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+ #[doc(hidden)]
+ #[link_section = \"{initcall_section}\"]
+ #[used]
+ pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+
+ #[cfg(not(MODULE))]
+ #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+ core::arch::global_asm!(
+ r#\".section \"{initcall_section}\", \"a\"
+ __{name}_initcall:
+ .long __{name}_init - .
+ .previous
+ \"#
+ );
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+ // SAFETY: This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // placement above in the initcall section.
+ unsafe {{ __init() }}
+ }}
- #[cfg(not(MODULE))]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
- __init()
- }}
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_exit() {{
+ // SAFETY:
+ // - This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name,
+ // - furthermore it is only called after `__{name}_init` has returned `0`
+ // (which delegates to `__init`).
+ unsafe {{ __exit() }}
+ }}
- #[cfg(not(MODULE))]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn __{name}_exit() {{
- __exit()
- }}
+ /// # Safety
+ ///
+ /// This function must only be called once.
+ unsafe fn __init() -> core::ffi::c_int {{
+ match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
+ Ok(m) => {{
+ // SAFETY: No data race, since `__MOD` can only be accessed by this
+ // module and there only `__init` and `__exit` access it. These
+ // functions are only called once and `__exit` cannot be called
+ // before or during `__init`.
+ unsafe {{
+ __MOD = Some(m);
+ }}
+ return 0;
+ }}
+ Err(e) => {{
+ return e.to_errno();
+ }}
+ }}
+ }}
- fn __init() -> core::ffi::c_int {{
- match <{type_} as kernel::Module>::init(&THIS_MODULE) {{
- Ok(m) => {{
+ /// # Safety
+ ///
+ /// This function must
+ /// - only be called once,
+ /// - be called after `__init` has been called and returned `0`.
+ unsafe fn __exit() {{
+ // SAFETY: No data race, since `__MOD` can only be accessed by this module
+ // and there only `__init` and `__exit` access it. These functions are only
+ // called once and `__init` was already called.
unsafe {{
- __MOD = Some(m);
+ // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+ __MOD = None;
}}
- return 0;
- }}
- Err(e) => {{
- return e.to_errno();
}}
- }}
- }}
- fn __exit() {{
- unsafe {{
- // Invokes `drop()` on `__MOD`, which should be used for cleanup.
- __MOD = None;
+ {modinfo}
}}
}}
-
- {modinfo}
",
type_ = info.type_,
name = info.name,
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index baf86c0880b6..533a7799fdfe 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -273,7 +273,7 @@ rust_common_cmd = \
-Zallow-features=$(rust_allowed_features) \
-Zcrate-attr=no_std \
-Zcrate-attr='feature($(rust_allowed_features))' \
- --extern alloc --extern kernel \
+ -Zunstable-options --extern force:alloc --extern kernel \
--crate-type rlib -L $(objtree)/rust/ \
--crate-name $(basename $(notdir $@)) \
--sysroot=/dev/null \
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 3ce5d503a6da..c5af566e911a 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -114,6 +114,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+KBUILD_CFLAGS += -Wno-override-init # alias for -Wno-initializer-overrides in clang
+
ifdef CONFIG_CC_IS_CLANG
# Clang before clang-16 would warn on default argument promotions.
ifneq ($(call clang-min-version, 160000),y)
@@ -151,10 +153,6 @@ KBUILD_CFLAGS += -Wtype-limits
KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
-ifdef CONFIG_CC_IS_CLANG
-KBUILD_CFLAGS += -Winitializer-overrides
-endif
-
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN2
else
@@ -164,9 +162,7 @@ KBUILD_CFLAGS += -Wno-missing-field-initializers
KBUILD_CFLAGS += -Wno-type-limits
KBUILD_CFLAGS += -Wno-shift-negative-value
-ifdef CONFIG_CC_IS_CLANG
-KBUILD_CFLAGS += -Wno-initializer-overrides
-else
+ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -Wno-maybe-uninitialized
endif
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 8568d256d6fb..79fcf2731686 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
part-of-module = y
quiet_cmd_cc_o_c = CC [M] $@
- cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
+ cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV) $(CFLAGS_KCSAN), $(c_flags)) -c -o $@ $<
%.mod.o: %.mod.c FORCE
$(call if_changed_dep,cc_o_c)
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
index 4606944984ee..c55878bddfdd 100755
--- a/scripts/bpf_doc.py
+++ b/scripts/bpf_doc.py
@@ -414,8 +414,8 @@ class PrinterRST(Printer):
version = version.stdout.decode().rstrip()
except:
try:
- version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot,
- capture_output=True, check=True)
+ version = subprocess.run(['make', '-s', '--no-print-directory', 'kernelversion'],
+ cwd=linuxRoot, capture_output=True, check=True)
version = version.stdout.decode().rstrip()
except:
return 'Linux'
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
index c5c2ce113c92..d20c47d21ad8 100644
--- a/scripts/gcc-plugins/stackleak_plugin.c
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -467,6 +467,8 @@ static bool stackleak_gate(void)
return false;
if (STRING_EQUAL(section, ".entry.text"))
return false;
+ if (STRING_EQUAL(section, ".head.text"))
+ return false;
}
return track_frame_size >= 0;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index b5730061872b..965bb40c50e5 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -552,11 +552,6 @@ static int conf_choice(struct menu *menu)
continue;
}
sym_set_tristate_value(child->sym, yes);
- for (child = child->list; child; child = child->next) {
- indent += 2;
- conf(child);
- indent -= 2;
- }
return 1;
}
}
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index e69d7c59d930..e7cc9e985c4f 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -89,7 +89,7 @@ void menu_add_visibility(struct expr *dep);
struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
-void menu_finalize(struct menu *parent);
+void menu_finalize(void);
void menu_set_type(int type);
extern struct menu rootmenu;
diff --git a/scripts/kconfig/lxdialog/checklist.c b/scripts/kconfig/lxdialog/checklist.c
index 31d0a89fbeb7..75493302fb85 100644
--- a/scripts/kconfig/lxdialog/checklist.c
+++ b/scripts/kconfig/lxdialog/checklist.c
@@ -119,7 +119,7 @@ int dialog_checklist(const char *title, const char *prompt, int height,
}
do_resize:
- if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN))
+ if (getmaxy(stdscr) < (height + CHECKLIST_HEIGHT_MIN))
return -ERRDISPLAYTOOSMALL;
if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN))
return -ERRDISPLAYTOOSMALL;
diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h
index 2d15ba893fbf..f6c2ebe6d1f9 100644
--- a/scripts/kconfig/lxdialog/dialog.h
+++ b/scripts/kconfig/lxdialog/dialog.h
@@ -162,17 +162,17 @@ int on_key_esc(WINDOW *win);
int on_key_resize(void);
/* minimum (re)size values */
-#define CHECKLIST_HEIGTH_MIN 6 /* For dialog_checklist() */
+#define CHECKLIST_HEIGHT_MIN 6 /* For dialog_checklist() */
#define CHECKLIST_WIDTH_MIN 6
-#define INPUTBOX_HEIGTH_MIN 2 /* For dialog_inputbox() */
+#define INPUTBOX_HEIGHT_MIN 2 /* For dialog_inputbox() */
#define INPUTBOX_WIDTH_MIN 2
-#define MENUBOX_HEIGTH_MIN 15 /* For dialog_menu() */
+#define MENUBOX_HEIGHT_MIN 15 /* For dialog_menu() */
#define MENUBOX_WIDTH_MIN 65
-#define TEXTBOX_HEIGTH_MIN 8 /* For dialog_textbox() */
+#define TEXTBOX_HEIGHT_MIN 8 /* For dialog_textbox() */
#define TEXTBOX_WIDTH_MIN 8
-#define YESNO_HEIGTH_MIN 4 /* For dialog_yesno() */
+#define YESNO_HEIGHT_MIN 4 /* For dialog_yesno() */
#define YESNO_WIDTH_MIN 4
-#define WINDOW_HEIGTH_MIN 19 /* For init_dialog() */
+#define WINDOW_HEIGHT_MIN 19 /* For init_dialog() */
#define WINDOW_WIDTH_MIN 80
int init_dialog(const char *backtitle);
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 1dcfb288ee63..3c6e24b20f5b 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -43,7 +43,7 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
strcpy(instr, init);
do_resize:
- if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN))
+ if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGHT_MIN))
return -ERRDISPLAYTOOSMALL;
if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN))
return -ERRDISPLAYTOOSMALL;
diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
index 0e333284e947..6e6244df0c56 100644
--- a/scripts/kconfig/lxdialog/menubox.c
+++ b/scripts/kconfig/lxdialog/menubox.c
@@ -172,7 +172,7 @@ int dialog_menu(const char *title, const char *prompt,
do_resize:
height = getmaxy(stdscr);
width = getmaxx(stdscr);
- if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN)
+ if (height < MENUBOX_HEIGHT_MIN || width < MENUBOX_WIDTH_MIN)
return -ERRDISPLAYTOOSMALL;
height -= 4;
diff --git a/scripts/kconfig/lxdialog/textbox.c b/scripts/kconfig/lxdialog/textbox.c
index 058ed0e5bbd5..0abaf635978f 100644
--- a/scripts/kconfig/lxdialog/textbox.c
+++ b/scripts/kconfig/lxdialog/textbox.c
@@ -175,7 +175,7 @@ int dialog_textbox(const char *title, const char *tbuf, int initial_height,
do_resize:
getmaxyx(stdscr, height, width);
- if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN)
+ if (height < TEXTBOX_HEIGHT_MIN || width < TEXTBOX_WIDTH_MIN)
return -ERRDISPLAYTOOSMALL;
if (initial_height != 0)
height = initial_height;
diff --git a/scripts/kconfig/lxdialog/util.c b/scripts/kconfig/lxdialog/util.c
index 3fb7508b68a2..f18e2a89f613 100644
--- a/scripts/kconfig/lxdialog/util.c
+++ b/scripts/kconfig/lxdialog/util.c
@@ -291,7 +291,7 @@ int init_dialog(const char *backtitle)
getyx(stdscr, saved_y, saved_x);
getmaxyx(stdscr, height, width);
- if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) {
+ if (height < WINDOW_HEIGHT_MIN || width < WINDOW_WIDTH_MIN) {
endwin();
return -ERRDISPLAYTOOSMALL;
}
diff --git a/scripts/kconfig/lxdialog/yesno.c b/scripts/kconfig/lxdialog/yesno.c
index bcaac9b7bab2..b57d25e1549f 100644
--- a/scripts/kconfig/lxdialog/yesno.c
+++ b/scripts/kconfig/lxdialog/yesno.c
@@ -32,7 +32,7 @@ int dialog_yesno(const char *title, const char *prompt, int height, int width)
WINDOW *dialog;
do_resize:
- if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN))
+ if (getmaxy(stdscr) < (height + YESNO_HEIGHT_MIN))
return -ERRDISPLAYTOOSMALL;
if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN))
return -ERRDISPLAYTOOSMALL;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index f4bb391d50cf..c0969097447d 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -659,9 +659,9 @@ static void conf_choice(struct menu *menu)
dialog_clear();
res = dialog_checklist(prompt ? prompt : "Main Menu",
radiolist_instructions,
- MENUBOX_HEIGTH_MIN,
+ MENUBOX_HEIGHT_MIN,
MENUBOX_WIDTH_MIN,
- CHECKLIST_HEIGTH_MIN);
+ CHECKLIST_HEIGHT_MIN);
selected = item_activate_selected();
switch (res) {
case 0:
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 8498481e6afe..3b822cd110f4 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -282,7 +282,7 @@ static void sym_check_prop(struct symbol *sym)
}
}
-void menu_finalize(struct menu *parent)
+static void _menu_finalize(struct menu *parent, bool inside_choice)
{
struct menu *menu, *last_menu;
struct symbol *sym;
@@ -296,7 +296,12 @@ void menu_finalize(struct menu *parent)
* and propagate parent dependencies before moving on.
*/
- if (sym && sym_is_choice(sym)) {
+ bool is_choice = false;
+
+ if (sym && sym_is_choice(sym))
+ is_choice = true;
+
+ if (is_choice) {
if (sym->type == S_UNKNOWN) {
/* find the first choice value to find out choice type */
current_entry = parent;
@@ -394,7 +399,7 @@ void menu_finalize(struct menu *parent)
}
}
- if (sym && sym_is_choice(sym))
+ if (is_choice)
expr_free(parentdep);
/*
@@ -402,8 +407,8 @@ void menu_finalize(struct menu *parent)
* moving on
*/
for (menu = parent->list; menu; menu = menu->next)
- menu_finalize(menu);
- } else if (sym) {
+ _menu_finalize(menu, is_choice);
+ } else if (!inside_choice && sym) {
/*
* Automatic submenu creation. If sym is a symbol and A, B, C,
* ... are consecutive items (symbols, menus, ifs, etc.) that
@@ -463,7 +468,7 @@ void menu_finalize(struct menu *parent)
/* Superset, put in submenu */
expr_free(dep2);
next:
- menu_finalize(menu);
+ _menu_finalize(menu, false);
menu->parent = parent;
last_menu = menu;
}
@@ -582,6 +587,11 @@ void menu_finalize(struct menu *parent)
}
}
+void menu_finalize(void)
+{
+ _menu_finalize(&rootmenu, false);
+}
+
bool menu_has_prompt(struct menu *menu)
{
if (!menu->prompt)
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index b45bfaf0a02b..7fb996612c96 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -515,7 +515,7 @@ void conf_parse(const char *name)
menu_add_prompt(P_MENU, "Main menu", NULL);
}
- menu_finalize(&rootmenu);
+ menu_finalize();
menu = &rootmenu;
while (menu) {
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 967f1abb0edb..cb1be22afc65 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1541,7 +1541,7 @@ sub create_parameterlist($$$$) {
save_struct_actual($2);
push_parameter($2, "$type $1", $arg, $file, $declaration_name);
- } elsif ($param =~ m/(.*?):(\d+)/) {
+ } elsif ($param =~ m/(.*?):(\w+)/) {
if ($type ne "") { # skip unnamed bit-fields
save_struct_actual($1);
push_parameter($1, "$type:$2", $arg, $file, $declaration_name)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 6b37039c9e92..2f5b91da5afa 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1007,6 +1007,8 @@ static Elf_Sym *find_fromsym(struct elf_info *elf, Elf_Addr addr,
static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
{
+ Elf_Sym *new_sym;
+
/* If the supplied symbol has a valid name, return it */
if (is_valid_name(elf, sym))
return sym;
@@ -1015,8 +1017,9 @@ static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
* Strive to find a better symbol name, but the resulting name may not
* match the symbol referenced in the original code.
*/
- return symsearch_find_nearest(elf, addr, get_secindex(elf, sym),
- true, 20);
+ new_sym = symsearch_find_nearest(elf, addr, get_secindex(elf, sym),
+ true, 20);
+ return new_sym ? new_sym : sym;
}
static bool is_executable_section(struct elf_info *elf, unsigned int secndx)
diff --git a/security/security.c b/security/security.c
index 7e118858b545..0a9a0ac3f266 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1793,11 +1793,11 @@ int security_path_mknod(const struct path *dir, struct dentry *dentry,
EXPORT_SYMBOL(security_path_mknod);
/**
- * security_path_post_mknod() - Update inode security field after file creation
+ * security_path_post_mknod() - Update inode security after reg file creation
* @idmap: idmap of the mount
* @dentry: new file
*
- * Update inode security field after a file has been created.
+ * Update inode security field after a regular file has been created.
*/
void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry)
{
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0619a1cbbfbe..074d6c2714eb 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -2123,7 +2123,6 @@ static struct file_system_type sel_fs_type = {
.kill_sb = sel_kill_sb,
};
-static struct vfsmount *selinuxfs_mount __ro_after_init;
struct path selinux_null __ro_after_init;
static int __init init_sel_fs(void)
@@ -2145,18 +2144,21 @@ static int __init init_sel_fs(void)
return err;
}
- selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
- if (IS_ERR(selinuxfs_mount)) {
+ selinux_null.mnt = kern_mount(&sel_fs_type);
+ if (IS_ERR(selinux_null.mnt)) {
pr_err("selinuxfs: could not mount!\n");
- err = PTR_ERR(selinuxfs_mount);
- selinuxfs_mount = NULL;
+ err = PTR_ERR(selinux_null.mnt);
+ selinux_null.mnt = NULL;
+ return err;
}
+
selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root,
&null_name);
if (IS_ERR(selinux_null.dentry)) {
pr_err("selinuxfs: could not lookup null!\n");
err = PTR_ERR(selinux_null.dentry);
selinux_null.dentry = NULL;
+ return err;
}
return err;
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index b8ff5cccd0c8..5431d2c49421 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -158,7 +158,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
struct device_node *child, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
- char node_name[6];
+ char node_name[8];
static const char *rnames[] = { "i2sbus: %pOFn (control)",
"i2sbus: %pOFn (tx)",
"i2sbus: %pOFn (rx)" };
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index b141024830ec..ee6ac649df83 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -428,7 +428,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
midi1->note.group = midi2->note.group;
midi1->note.status = midi2->note.status;
midi1->note.channel = midi2->note.channel;
- switch (midi2->note.status << 4) {
+ switch (midi2->note.status) {
case UMP_MSG_STATUS_NOTE_ON:
case UMP_MSG_STATUS_NOTE_OFF:
midi1->note.note = midi2->note.note;
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index 696a958d93e9..088cff799e0b 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -343,3 +343,29 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
return NULL;
}
EXPORT_SYMBOL(intel_nhlt_get_endpoint_blob);
+
+int intel_nhlt_ssp_device_type(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u8 virtual_bus_id)
+{
+ struct nhlt_endpoint *epnt;
+ int i;
+
+ if (!nhlt)
+ return -EINVAL;
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+ for (i = 0; i < nhlt->endpoint_count; i++) {
+ /* for SSP link the virtual bus id is the SSP port number */
+ if (epnt->linktype == NHLT_LINK_SSP &&
+ epnt->virtual_bus_id == virtual_bus_id) {
+ dev_dbg(dev, "SSP%d: dev_type=%d\n", virtual_bus_id,
+ epnt->device_type);
+ return epnt->device_type;
+ }
+
+ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(intel_nhlt_ssp_device_type);
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index 0ba8f0c4cd99..3a593da09280 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -725,7 +725,13 @@ static void __exit amiga_audio_remove(struct platform_device *pdev)
dmasound_deinit();
}
-static struct platform_driver amiga_audio_driver = {
+/*
+ * amiga_audio_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_audio_driver __refdata = {
.remove_new = __exit_p(amiga_audio_remove),
.driver = {
.name = "amiga-audio",
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
index d36234b88fb4..941bfbf812ed 100644
--- a/sound/pci/emu10k1/emu10k1_callback.c
+++ b/sound/pci/emu10k1/emu10k1_callback.c
@@ -255,7 +255,7 @@ lookup_voices(struct snd_emux *emu, struct snd_emu10k1 *hw,
/* check if sample is finished playing (non-looping only) */
if (bp != best + V_OFF && bp != best + V_FREE &&
(vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) {
- val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch) - 64;
+ val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch);
if (val >= vp->reg.loopstart)
bp = best + V_OFF;
}
@@ -362,7 +362,7 @@ start_voice(struct snd_emux_voice *vp)
map = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
- addr = vp->reg.start + 64;
+ addr = vp->reg.start;
temp = vp->reg.parm.filterQ;
ccca = (temp << 28) | addr;
if (vp->apitch < 0xe400)
@@ -430,9 +430,6 @@ start_voice(struct snd_emux_voice *vp)
/* Q & current address (Q 4bit value, MSB) */
CCCA, ccca,
- /* cache */
- CCR, REG_VAL_PUT(CCR_CACHEINVALIDSIZE, 64),
-
/* reset volume */
VTFT, vtarget | vp->ftarget,
CVCF, vtarget | CVCF_CURRENTFILTER_MASK,
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 72ec872afb8d..8fb688e41414 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -108,7 +108,10 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "10431F12", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431F1F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 },
{ "10431F62", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "10433A60", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "17AA386F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+ { "17AA3877", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "17AA3878", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38A9", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{ "17AA38AB", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{ "17AA38B4", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
@@ -496,7 +499,10 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "10431F12", generic_dsd_config },
{ "CSC3551", "10431F1F", generic_dsd_config },
{ "CSC3551", "10431F62", generic_dsd_config },
+ { "CSC3551", "10433A60", generic_dsd_config },
{ "CSC3551", "17AA386F", generic_dsd_config },
+ { "CSC3551", "17AA3877", generic_dsd_config },
+ { "CSC3551", "17AA3878", generic_dsd_config },
{ "CSC3551", "17AA38A9", generic_dsd_config },
{ "CSC3551", "17AA38AB", generic_dsd_config },
{ "CSC3551", "17AA38B4", generic_dsd_config },
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index 41974b3897a7..1a3f84599cb5 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -1024,8 +1024,8 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
goto err;
}
- dev_dbg(cs35l56->base.dev, "DSP system name: '%s', amp name: '%s'\n",
- cs35l56->system_name, cs35l56->amp_name);
+ dev_info(cs35l56->base.dev, "DSP system name: '%s', amp name: '%s'\n",
+ cs35l56->system_name, cs35l56->amp_name);
regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
ARRAY_SIZE(cs35l56_hda_dai_config));
@@ -1045,14 +1045,14 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
pm_runtime_mark_last_busy(cs35l56->base.dev);
pm_runtime_enable(cs35l56->base.dev);
+ cs35l56->base.init_done = true;
+
ret = component_add(cs35l56->base.dev, &cs35l56_hda_comp_ops);
if (ret) {
dev_err(cs35l56->base.dev, "Register component failed: %d\n", ret);
goto pm_err;
}
- cs35l56->base.init_done = true;
-
return 0;
pm_err:
diff --git a/sound/pci/hda/cs35l56_hda_i2c.c b/sound/pci/hda/cs35l56_hda_i2c.c
index 13beee807308..40f2f97944d5 100644
--- a/sound/pci/hda/cs35l56_hda_i2c.c
+++ b/sound/pci/hda/cs35l56_hda_i2c.c
@@ -56,10 +56,19 @@ static const struct i2c_device_id cs35l56_hda_i2c_id[] = {
{}
};
+static const struct acpi_device_id cs35l56_acpi_hda_match[] = {
+ { "CSC3554", 0 },
+ { "CSC3556", 0 },
+ { "CSC3557", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_acpi_hda_match);
+
static struct i2c_driver cs35l56_hda_i2c_driver = {
.driver = {
- .name = "cs35l56-hda",
- .pm = &cs35l56_hda_pm_ops,
+ .name = "cs35l56-hda",
+ .acpi_match_table = cs35l56_acpi_hda_match,
+ .pm = &cs35l56_hda_pm_ops,
},
.id_table = cs35l56_hda_i2c_id,
.probe = cs35l56_hda_i2c_probe,
diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
index a3b2fa76663d..7f02155fe61e 100644
--- a/sound/pci/hda/cs35l56_hda_spi.c
+++ b/sound/pci/hda/cs35l56_hda_spi.c
@@ -56,10 +56,19 @@ static const struct spi_device_id cs35l56_hda_spi_id[] = {
{}
};
+static const struct acpi_device_id cs35l56_acpi_hda_match[] = {
+ { "CSC3554", 0 },
+ { "CSC3556", 0 },
+ { "CSC3557", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_acpi_hda_match);
+
static struct spi_driver cs35l56_hda_spi_driver = {
.driver = {
- .name = "cs35l56-hda",
- .pm = &cs35l56_hda_pm_ops,
+ .name = "cs35l56-hda",
+ .acpi_match_table = cs35l56_acpi_hda_match,
+ .pm = &cs35l56_hda_pm_ops,
},
.id_table = cs35l56_hda_spi_id,
.probe = cs35l56_hda_spi_probe,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a17c36a36aa5..70d80b6af3fe 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6875,11 +6875,38 @@ static void alc287_fixup_legion_16ithg6_speakers(struct hda_codec *cdc, const st
comp_generic_fixup(cdc, action, "i2c", "CLSA0101", "-%s:00-cs35l41-hda.%d", 2);
}
+static void cs35l56_fixup_i2c_two(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
+{
+ comp_generic_fixup(cdc, action, "i2c", "CSC3556", "-%s:00-cs35l56-hda.%d", 2);
+}
+
+static void cs35l56_fixup_i2c_four(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
+{
+ comp_generic_fixup(cdc, action, "i2c", "CSC3556", "-%s:00-cs35l56-hda.%d", 4);
+}
+
+static void cs35l56_fixup_spi_two(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
+{
+ comp_generic_fixup(cdc, action, "spi", "CSC3556", "-%s:00-cs35l56-hda.%d", 2);
+}
+
static void cs35l56_fixup_spi_four(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
{
comp_generic_fixup(cdc, action, "spi", "CSC3556", "-%s:00-cs35l56-hda.%d", 4);
}
+static void alc285_fixup_asus_ga403u(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
+{
+ /*
+ * The same SSID has been re-used in different hardware, they have
+ * different codecs and the newer GA403U has a ALC285.
+ */
+ if (cdc->core.vendor_id == 0x10ec0285)
+ cs35l56_fixup_i2c_two(cdc, fix, action);
+ else
+ alc_fixup_inv_dmic(cdc, fix, action);
+}
+
static void tas2781_fixup_i2c(struct hda_codec *cdc,
const struct hda_fixup *fix, int action)
{
@@ -7436,6 +7463,14 @@ enum {
ALC256_FIXUP_ACER_SFG16_MICMUTE_LED,
ALC256_FIXUP_HEADPHONE_AMP_VOL,
ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX,
+ ALC285_FIXUP_CS35L56_SPI_2,
+ ALC285_FIXUP_CS35L56_I2C_2,
+ ALC285_FIXUP_CS35L56_I2C_4,
+ ALC285_FIXUP_ASUS_GA403U,
+ ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC,
+ ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC,
+ ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9643,6 +9678,54 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_spectre_x360_eu0xxx,
},
+ [ALC285_FIXUP_CS35L56_SPI_2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l56_fixup_spi_two,
+ },
+ [ALC285_FIXUP_CS35L56_I2C_2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l56_fixup_i2c_two,
+ },
+ [ALC285_FIXUP_CS35L56_I2C_4] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l56_fixup_i2c_four,
+ },
+ [ALC285_FIXUP_ASUS_GA403U] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_asus_ga403u,
+ },
+ [ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 },
+ { 0x1b, 0x03a11c30 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1
+ },
+ [ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC,
+ },
+ [ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 },
+ { 0x1b, 0x03a11c30 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_CS35L56_SPI_2
+ },
+ [ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_GA403U,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10037,6 +10120,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -10096,7 +10181,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
- SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10104,6 +10189,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
@@ -10115,11 +10201,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606", ALC285_FIXUP_CS35L56_I2C_4),
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_CS35L56_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_CS35L56_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
@@ -10133,7 +10222,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
@@ -10159,7 +10248,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x12f6, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
- SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
+ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
@@ -10177,6 +10266,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x152d, 0x1262, "Huawei NBLB-WAX9N", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1558, 0x0353, "Clevo V35[05]SN[CDE]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -10282,6 +10372,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x2234, "Thinkpad ICE-1", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
@@ -10333,6 +10424,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x386f, "Legion 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C),
@@ -10341,8 +10434,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
- SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10403,6 +10496,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 4475cea8e9f7..75f7674c66ee 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -89,7 +89,7 @@ struct tas2781_hda {
struct snd_kcontrol *dsp_prog_ctl;
struct snd_kcontrol *dsp_conf_ctl;
struct snd_kcontrol *prof_ctl;
- struct snd_kcontrol *snd_ctls[3];
+ struct snd_kcontrol *snd_ctls[2];
};
static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
@@ -161,8 +161,6 @@ static void tas2781_hda_playback_hook(struct device *dev, int action)
pm_runtime_put_autosuspend(dev);
break;
default:
- dev_dbg(tas_hda->dev, "Playback action not supported: %d\n",
- action);
break;
}
}
@@ -185,8 +183,15 @@ static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->rcabin.profile_cfg_id;
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",
+ __func__, kcontrol->id.name, tas_priv->rcabin.profile_cfg_id);
+
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -200,11 +205,19 @@ static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
val = clamp(nr_profile, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",
+ __func__, kcontrol->id.name,
+ tas_priv->rcabin.profile_cfg_id, val);
+
if (tas_priv->rcabin.profile_cfg_id != val) {
tas_priv->rcabin.profile_cfg_id = val;
ret = 1;
}
+ mutex_unlock(&tas_priv->codec_lock);
+
return ret;
}
@@ -241,8 +254,15 @@ static int tasdevice_program_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->cur_prog;
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",
+ __func__, kcontrol->id.name, tas_priv->cur_prog);
+
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -257,11 +277,18 @@ static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
val = clamp(nr_program, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",
+ __func__, kcontrol->id.name, tas_priv->cur_prog, val);
+
if (tas_priv->cur_prog != val) {
tas_priv->cur_prog = val;
ret = 1;
}
+ mutex_unlock(&tas_priv->codec_lock);
+
return ret;
}
@@ -270,8 +297,15 @@ static int tasdevice_config_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->cur_conf;
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",
+ __func__, kcontrol->id.name, tas_priv->cur_conf);
+
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -286,54 +320,39 @@ static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
val = clamp(nr_config, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",
+ __func__, kcontrol->id.name, tas_priv->cur_conf, val);
+
if (tas_priv->cur_conf != val) {
tas_priv->cur_conf = val;
ret = 1;
}
+ mutex_unlock(&tas_priv->codec_lock);
+
return ret;
}
-/*
- * tas2781_digital_getvol - get the volum control
- * @kcontrol: control pointer
- * @ucontrol: User data
- * Customer Kcontrol for tas2781 is primarily for regmap booking, paging
- * depends on internal regmap mechanism.
- * tas2781 contains book and page two-level register map, especially
- * book switching will set the register BXXP00R7F, after switching to the
- * correct book, then leverage the mechanism for paging to access the
- * register.
- */
-static int tas2781_digital_getvol(struct snd_kcontrol *kcontrol,
+static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ int ret;
- return tasdevice_digital_getvol(tas_priv, ucontrol, mc);
-}
+ mutex_lock(&tas_priv->codec_lock);
-static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
+ ret = tasdevice_amp_getvol(tas_priv, ucontrol, mc);
- return tasdevice_amp_getvol(tas_priv, ucontrol, mc);
-}
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %ld\n",
+ __func__, kcontrol->id.name, ucontrol->value.integer.value[0]);
-static int tas2781_digital_putvol(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
+ mutex_unlock(&tas_priv->codec_lock);
- /* The check of the given value is in tasdevice_digital_putvol. */
- return tasdevice_digital_putvol(tas_priv, ucontrol, mc);
+ return ret;
}
static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
@@ -342,9 +361,19 @@ static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ int ret;
+
+ mutex_lock(&tas_priv->codec_lock);
+
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: -> %ld\n",
+ __func__, kcontrol->id.name, ucontrol->value.integer.value[0]);
/* The check of the given value is in tasdevice_amp_putvol. */
- return tasdevice_amp_putvol(tas_priv, ucontrol, mc);
+ ret = tasdevice_amp_putvol(tas_priv, ucontrol, mc);
+
+ mutex_unlock(&tas_priv->codec_lock);
+
+ return ret;
}
static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
@@ -352,9 +381,13 @@ static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = (int)tas_priv->force_fwload_status;
- dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
- tas_priv->force_fwload_status ? "ON" : "OFF");
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",
+ __func__, kcontrol->id.name, tas_priv->force_fwload_status);
+
+ mutex_unlock(&tas_priv->codec_lock);
return 0;
}
@@ -365,14 +398,20 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
bool change, val = (bool)ucontrol->value.integer.value[0];
+ mutex_lock(&tas_priv->codec_lock);
+
+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",
+ __func__, kcontrol->id.name,
+ tas_priv->force_fwload_status, val);
+
if (tas_priv->force_fwload_status == val)
change = false;
else {
change = true;
tas_priv->force_fwload_status = val;
}
- dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
- tas_priv->force_fwload_status ? "ON" : "OFF");
+
+ mutex_unlock(&tas_priv->codec_lock);
return change;
}
@@ -381,9 +420,6 @@ static const struct snd_kcontrol_new tas2781_snd_controls[] = {
ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL,
1, 0, 20, 0, tas2781_amp_getvol,
tas2781_amp_putvol, amp_vol_tlv),
- ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL,
- 0, 0, 200, 1, tas2781_digital_getvol,
- tas2781_digital_putvol, dvc_tlv),
ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,
tas2781_force_fwload_get, tas2781_force_fwload_put),
};
@@ -478,10 +514,10 @@ static int tas2563_save_calibration(struct tasdevice_priv *tas_priv)
static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
{
static const unsigned char page_array[CALIB_MAX] = {
- 0x17, 0x18, 0x18, 0x0d, 0x18
+ 0x17, 0x18, 0x18, 0x13, 0x18,
};
static const unsigned char rgno_array[CALIB_MAX] = {
- 0x74, 0x0c, 0x14, 0x3c, 0x7c
+ 0x74, 0x0c, 0x14, 0x70, 0x7c,
};
unsigned char *data;
int i, j, rc;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 320ac792c7fe..3182c634464d 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -278,7 +278,8 @@ static void run_spu_dma(struct work_struct *work)
dreamcastcard->clicks++;
if (unlikely(dreamcastcard->clicks >= AICA_PERIOD_NUMBER))
dreamcastcard->clicks %= AICA_PERIOD_NUMBER;
- mod_timer(&dreamcastcard->timer, jiffies + 1);
+ if (snd_pcm_running(dreamcastcard->substream))
+ mod_timer(&dreamcastcard->timer, jiffies + 1);
}
}
@@ -290,6 +291,8 @@ static void aica_period_elapsed(struct timer_list *t)
/*timer function - so cannot sleep */
int play_period;
struct snd_pcm_runtime *runtime;
+ if (!snd_pcm_running(substream))
+ return;
runtime = substream->runtime;
dreamcastcard = substream->pcm->private_data;
/* Have we played out an additional period? */
@@ -350,12 +353,19 @@ static int snd_aicapcm_pcm_open(struct snd_pcm_substream
return 0;
}
+static int snd_aicapcm_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
+
+ del_timer_sync(&dreamcastcard->timer);
+ cancel_work_sync(&dreamcastcard->spu_dma_work);
+ return 0;
+}
+
static int snd_aicapcm_pcm_close(struct snd_pcm_substream
*substream)
{
struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
- flush_work(&(dreamcastcard->spu_dma_work));
- del_timer(&dreamcastcard->timer);
dreamcastcard->substream = NULL;
kfree(dreamcastcard->channel);
spu_disable();
@@ -401,6 +411,7 @@ static const struct snd_pcm_ops snd_aicapcm_playback_ops = {
.prepare = snd_aicapcm_pcm_prepare,
.trigger = snd_aicapcm_pcm_trigger,
.pointer = snd_aicapcm_pcm_pointer,
+ .sync_stop = snd_aicapcm_pcm_sync_stop,
};
/* TO DO: set up to handle more than one pcm instance */
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index 8c8b1dcac628..5f35b90eab8d 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -115,7 +115,10 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
goto unregister_dmic_dev;
}
- acp_init(chip);
+ ret = acp_init(chip);
+ if (ret)
+ goto unregister_dmic_dev;
+
res = devm_kcalloc(&pci->dev, num_res, sizeof(struct resource), GFP_KERNEL);
if (!res) {
ret = -ENOMEM;
@@ -133,11 +136,9 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
}
}
- if (flag == FLAG_AMD_LEGACY_ONLY_DMIC) {
- ret = check_acp_pdm(pci, chip);
- if (ret < 0)
- goto skip_pdev_creation;
- }
+ ret = check_acp_pdm(pci, chip);
+ if (ret < 0)
+ goto skip_pdev_creation;
chip->flag = flag;
memset(&pdevinfo, 0, sizeof(pdevinfo));
diff --git a/sound/soc/codecs/cs-amp-lib.c b/sound/soc/codecs/cs-amp-lib.c
index 01ef4db5407d..287ac01a3873 100644
--- a/sound/soc/codecs/cs-amp-lib.c
+++ b/sound/soc/codecs/cs-amp-lib.c
@@ -56,6 +56,11 @@ static int _cs_amp_write_cal_coeffs(struct cs_dsp *dsp,
dev_dbg(dsp->dev, "Calibration: Ambient=%#x, Status=%#x, CalR=%d\n",
data->calAmbient, data->calStatus, data->calR);
+ if (list_empty(&dsp->ctl_list)) {
+ dev_info(dsp->dev, "Calibration disabled due to missing firmware controls\n");
+ return -ENOENT;
+ }
+
ret = cs_amp_write_cal_coeff(dsp, controls, controls->ambient, data->calAmbient);
if (ret)
return ret;
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 860d5cda67bf..94685449f0f4 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2364,7 +2364,8 @@ static int cs42l43_codec_runtime_resume(struct device *dev)
static int cs42l43_codec_suspend(struct device *dev)
{
- struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ struct cs42l43_codec *priv = dev_get_drvdata(dev);
+ struct cs42l43 *cs42l43 = priv->core;
disable_irq(cs42l43->irq);
@@ -2373,7 +2374,8 @@ static int cs42l43_codec_suspend(struct device *dev)
static int cs42l43_codec_suspend_noirq(struct device *dev)
{
- struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ struct cs42l43_codec *priv = dev_get_drvdata(dev);
+ struct cs42l43 *cs42l43 = priv->core;
enable_irq(cs42l43->irq);
@@ -2382,7 +2384,8 @@ static int cs42l43_codec_suspend_noirq(struct device *dev)
static int cs42l43_codec_resume(struct device *dev)
{
- struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ struct cs42l43_codec *priv = dev_get_drvdata(dev);
+ struct cs42l43 *cs42l43 = priv->core;
enable_irq(cs42l43->irq);
@@ -2391,7 +2394,8 @@ static int cs42l43_codec_resume(struct device *dev)
static int cs42l43_codec_resume_noirq(struct device *dev)
{
- struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ struct cs42l43_codec *priv = dev_get_drvdata(dev);
+ struct cs42l43 *cs42l43 = priv->core;
disable_irq(cs42l43->irq);
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index 15289dadafea..17bd6b516077 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -412,9 +412,9 @@ static const struct _coeff_div coeff_div_v3[] = {
{125, 48000, 6000000, 0x04, 0x04, 0x1F, 0x2D, 0x8A, 0x0A, 0x27, 0x27},
{128, 8000, 1024000, 0x60, 0x00, 0x05, 0x75, 0x8A, 0x1B, 0x1F, 0x7F},
- {128, 16000, 2048000, 0x20, 0x00, 0x31, 0x35, 0x8A, 0x1B, 0x1F, 0x3F},
- {128, 44100, 5644800, 0xE0, 0x00, 0x01, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
- {128, 48000, 6144000, 0xE0, 0x00, 0x01, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
+ {128, 16000, 2048000, 0x20, 0x00, 0x31, 0x35, 0x08, 0x19, 0x1F, 0x3F},
+ {128, 44100, 5644800, 0xE0, 0x00, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
+ {128, 48000, 6144000, 0xE0, 0x00, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
{144, 8000, 1152000, 0x20, 0x00, 0x03, 0x35, 0x8A, 0x1B, 0x23, 0x47},
{144, 16000, 2304000, 0x20, 0x00, 0x11, 0x35, 0x8A, 0x1B, 0x23, 0x47},
{192, 8000, 1536000, 0x60, 0x02, 0x0D, 0x75, 0x8A, 0x1B, 0x1F, 0x7F},
@@ -423,10 +423,10 @@ static const struct _coeff_div coeff_div_v3[] = {
{200, 48000, 9600000, 0x04, 0x04, 0x0F, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
{250, 48000, 12000000, 0x04, 0x04, 0x0F, 0x2D, 0xCA, 0x0A, 0x27, 0x27},
- {256, 8000, 2048000, 0x60, 0x00, 0x31, 0x35, 0x8A, 0x1B, 0x1F, 0x7F},
- {256, 16000, 4096000, 0x20, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x3F},
- {256, 44100, 11289600, 0xE0, 0x00, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
- {256, 48000, 12288000, 0xE0, 0x00, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
+ {256, 8000, 2048000, 0x60, 0x00, 0x31, 0x35, 0x08, 0x19, 0x1F, 0x7F},
+ {256, 16000, 4096000, 0x20, 0x00, 0x01, 0x35, 0x08, 0x19, 0x1F, 0x3F},
+ {256, 44100, 11289600, 0xE0, 0x01, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
+ {256, 48000, 12288000, 0xE0, 0x01, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
{288, 8000, 2304000, 0x20, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x23, 0x47},
{384, 8000, 3072000, 0x60, 0x02, 0x05, 0x75, 0x8A, 0x1B, 0x1F, 0x7F},
{384, 16000, 6144000, 0x20, 0x02, 0x03, 0x35, 0x8A, 0x1B, 0x1F, 0x3F},
@@ -435,10 +435,10 @@ static const struct _coeff_div coeff_div_v3[] = {
{400, 48000, 19200000, 0xE4, 0x04, 0x35, 0x6d, 0xCA, 0x0A, 0x1F, 0x1F},
{500, 48000, 24000000, 0xF8, 0x04, 0x3F, 0x6D, 0xCA, 0x0A, 0x1F, 0x1F},
- {512, 8000, 4096000, 0x60, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x7F},
- {512, 16000, 8192000, 0x20, 0x00, 0x30, 0x35, 0x8A, 0x1B, 0x1F, 0x3F},
- {512, 44100, 22579200, 0xE0, 0x00, 0x00, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
- {512, 48000, 24576000, 0xE0, 0x00, 0x00, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
+ {512, 8000, 4096000, 0x60, 0x00, 0x01, 0x08, 0x19, 0x1B, 0x1F, 0x7F},
+ {512, 16000, 8192000, 0x20, 0x00, 0x30, 0x35, 0x08, 0x19, 0x1F, 0x3F},
+ {512, 44100, 22579200, 0xE0, 0x00, 0x00, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
+ {512, 48000, 24576000, 0xE0, 0x00, 0x00, 0x2D, 0x48, 0x08, 0x1F, 0x1F},
{768, 8000, 6144000, 0x60, 0x02, 0x11, 0x35, 0x8A, 0x1B, 0x1F, 0x7F},
{768, 16000, 12288000, 0x20, 0x02, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x3F},
{768, 32000, 24576000, 0xE0, 0x02, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F},
@@ -835,7 +835,6 @@ static void es8326_jack_detect_handler(struct work_struct *work)
dev_dbg(comp->dev, "Report hp remove event\n");
snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET);
/* mute adc when mic path switch */
- regmap_write(es8326->regmap, ES8326_ADC_SCALE, 0x33);
regmap_write(es8326->regmap, ES8326_ADC1_SRC, 0x44);
regmap_write(es8326->regmap, ES8326_ADC2_SRC, 0x66);
es8326->hp = 0;
@@ -843,6 +842,7 @@ static void es8326_jack_detect_handler(struct work_struct *work)
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x0a);
regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x03);
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE, ES8326_INT_SRC_PIN9);
/*
* Inverted HPJACK_POL bit to trigger one IRQ to double check HP Removal event
*/
@@ -865,6 +865,8 @@ static void es8326_jack_detect_handler(struct work_struct *work)
* set auto-check mode, then restart jack_detect_work after 400ms.
* Don't report jack status.
*/
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE,
+ (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
es8326_enable_micbias(es8326->component);
usleep_range(50000, 70000);
@@ -891,7 +893,6 @@ static void es8326_jack_detect_handler(struct work_struct *work)
snd_soc_jack_report(es8326->jack,
SND_JACK_HEADSET, SND_JACK_HEADSET);
- regmap_write(es8326->regmap, ES8326_ADC_SCALE, 0x33);
regmap_update_bits(es8326->regmap, ES8326_PGA_PDN,
0x08, 0x08);
regmap_update_bits(es8326->regmap, ES8326_PGAGAIN,
@@ -987,7 +988,7 @@ static int es8326_resume(struct snd_soc_component *component)
regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
regmap_write(es8326->regmap, ES8326_ANA_LP, 0xf0);
usleep_range(10000, 15000);
- regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xe9);
+ regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xd9);
regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0xcb);
/* set headphone default type and detect pin */
regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x83);
@@ -1038,8 +1039,7 @@ static int es8326_resume(struct snd_soc_component *component)
es8326_enable_micbias(es8326->component);
usleep_range(50000, 70000);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
- regmap_write(es8326->regmap, ES8326_INT_SOURCE,
- (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE, ES8326_INT_SRC_PIN9);
regmap_write(es8326->regmap, ES8326_INTOUT_IO,
es8326->interrupt_clk);
regmap_write(es8326->regmap, ES8326_SDINOUT1_IO,
@@ -1060,6 +1060,8 @@ static int es8326_resume(struct snd_soc_component *component)
es8326->hp = 0;
es8326->hpl_vol = 0x03;
es8326->hpr_vol = 0x03;
+
+ es8326_irq(es8326->irq, es8326);
return 0;
}
@@ -1070,6 +1072,9 @@ static int es8326_suspend(struct snd_soc_component *component)
cancel_delayed_work_sync(&es8326->jack_detect_work);
es8326_disable_micbias(component);
es8326->calibrated = false;
+ regmap_write(es8326->regmap, ES8326_CLK_MUX, 0x2d);
+ regmap_write(es8326->regmap, ES8326_DAC2HPMIX, 0x00);
+ regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
regmap_write(es8326->regmap, ES8326_CLK_CTL, ES8326_CLK_OFF);
regcache_cache_only(es8326->regmap, true);
regcache_mark_dirty(es8326->regmap);
diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h
index ee12caef8105..c3e52e7bdef5 100644
--- a/sound/soc/codecs/es8326.h
+++ b/sound/soc/codecs/es8326.h
@@ -104,7 +104,7 @@
#define ES8326_MUTE (3 << 0)
/* ES8326_CLK_CTL */
-#define ES8326_CLK_ON (0x7e << 0)
+#define ES8326_CLK_ON (0x7f << 0)
#define ES8326_CLK_OFF (0 << 0)
/* ES8326_CLK_INV */
diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
index 47511f70119a..0b3bf920bcab 100644
--- a/sound/soc/codecs/rt1316-sdw.c
+++ b/sound/soc/codecs/rt1316-sdw.c
@@ -537,7 +537,7 @@ static int rt1316_sdw_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt1316->sdw_slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -577,12 +577,12 @@ static int rt1316_sdw_parse_dt(struct rt1316_sdw_priv *rt1316, struct device *de
if (rt1316->bq_params_cnt) {
rt1316->bq_params = devm_kzalloc(dev, rt1316->bq_params_cnt, GFP_KERNEL);
if (!rt1316->bq_params) {
- dev_err(dev, "Could not allocate bq_params memory\n");
+ dev_err(dev, "%s: Could not allocate bq_params memory\n", __func__);
ret = -ENOMEM;
} else {
ret = device_property_read_u8_array(dev, "realtek,bq-params", rt1316->bq_params, rt1316->bq_params_cnt);
if (ret < 0)
- dev_err(dev, "Could not read list of realtek,bq-params\n");
+ dev_err(dev, "%s: Could not read list of realtek,bq-params\n", __func__);
}
}
@@ -759,7 +759,7 @@ static int __maybe_unused rt1316_dev_resume(struct device *dev)
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT1316_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt1318-sdw.c b/sound/soc/codecs/rt1318-sdw.c
index ff364bde4a08..462c9a4b1be5 100644
--- a/sound/soc/codecs/rt1318-sdw.c
+++ b/sound/soc/codecs/rt1318-sdw.c
@@ -606,7 +606,7 @@ static int rt1318_sdw_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt1318->sdw_slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -631,8 +631,8 @@ static int rt1318_sdw_hw_params(struct snd_pcm_substream *substream,
sampling_rate = RT1318_SDCA_RATE_192000HZ;
break;
default:
- dev_err(component->dev, "Rate %d is not supported\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Rate %d is not supported\n",
+ __func__, params_rate(params));
return -EINVAL;
}
@@ -835,7 +835,7 @@ static int __maybe_unused rt1318_dev_resume(struct device *dev)
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT1318_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
return -ETIMEDOUT;
}
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index e67c2e19cb1a..f9ee42c13dba 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -132,7 +132,7 @@ static int rt5682_sdw_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt5682->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -315,8 +315,8 @@ static int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
&rt5682_sdw_indirect_regmap);
if (IS_ERR(rt5682->regmap)) {
ret = PTR_ERR(rt5682->regmap);
- dev_err(dev, "Failed to allocate register map: %d\n",
- ret);
+ dev_err(dev, "%s: Failed to allocate register map: %d\n",
+ __func__, ret);
return ret;
}
@@ -400,7 +400,7 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
}
if (val != DEVICE_ID) {
- dev_err(dev, "Device with ID register %x is not rt5682\n", val);
+ dev_err(dev, "%s: Device with ID register %x is not rt5682\n", __func__, val);
ret = -ENODEV;
goto err_nodev;
}
@@ -648,7 +648,7 @@ static int rt5682_bus_config(struct sdw_slave *slave,
ret = rt5682_clock_config(&slave->dev);
if (ret < 0)
- dev_err(&slave->dev, "Invalid clk config");
+ dev_err(&slave->dev, "%s: Invalid clk config", __func__);
return ret;
}
@@ -763,19 +763,19 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
return 0;
if (!slave->unattach_request) {
+ mutex_lock(&rt5682->disable_irq_lock);
if (rt5682->disable_irq == true) {
- mutex_lock(&rt5682->disable_irq_lock);
sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
rt5682->disable_irq = false;
- mutex_unlock(&rt5682->disable_irq_lock);
}
+ mutex_unlock(&rt5682->disable_irq_lock);
goto regmap_sync;
}
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c
index 0ebf344a1b60..434b926f96c8 100644
--- a/sound/soc/codecs/rt700.c
+++ b/sound/soc/codecs/rt700.c
@@ -37,8 +37,8 @@ static int rt700_index_write(struct regmap *regmap,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ pr_err("%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -52,8 +52,8 @@ static int rt700_index_read(struct regmap *regmap,
*value = 0;
ret = regmap_read(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ pr_err("%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -930,14 +930,14 @@ static int rt700_pcm_hw_params(struct snd_pcm_substream *substream,
port_config.num += 2;
break;
default:
- dev_err(component->dev, "Invalid DAI id %d\n", dai->id);
+ dev_err(component->dev, "%s: Invalid DAI id %d\n", __func__, dai->id);
return -EINVAL;
}
retval = sdw_stream_add_slave(rt700->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -945,8 +945,8 @@ static int rt700_pcm_hw_params(struct snd_pcm_substream *substream,
/* bit 3:0 Number of Channel */
val |= (params_channels(params) - 1);
} else {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
index 935e597022d3..2636c2eea4bc 100644
--- a/sound/soc/codecs/rt711-sdca-sdw.c
+++ b/sound/soc/codecs/rt711-sdca-sdw.c
@@ -438,20 +438,20 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
return 0;
if (!slave->unattach_request) {
+ mutex_lock(&rt711->disable_irq_lock);
if (rt711->disable_irq == true) {
- mutex_lock(&rt711->disable_irq_lock);
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
rt711->disable_irq = false;
- mutex_unlock(&rt711->disable_irq_lock);
}
+ mutex_unlock(&rt711->disable_irq_lock);
goto regmap_sync;
}
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT711_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
index 447154cb6010..1e8dbfc3ecd9 100644
--- a/sound/soc/codecs/rt711-sdca.c
+++ b/sound/soc/codecs/rt711-sdca.c
@@ -36,8 +36,8 @@ static int rt711_sdca_index_write(struct rt711_sdca_priv *rt711,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
dev_err(&rt711->slave->dev,
- "Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ "%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -52,8 +52,8 @@ static int rt711_sdca_index_read(struct rt711_sdca_priv *rt711,
ret = regmap_read(regmap, addr, value);
if (ret < 0)
dev_err(&rt711->slave->dev,
- "Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ "%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -1293,13 +1293,13 @@ static int rt711_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt711->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
if (params_channels(params) > 16) {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
@@ -1318,8 +1318,8 @@ static int rt711_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
sampling_rate = RT711_SDCA_RATE_192000HZ;
break;
default:
- dev_err(component->dev, "Rate %d is not supported\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Rate %d is not supported\n",
+ __func__, params_rate(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index 3f5773310ae8..0d3b43dd22e6 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -408,7 +408,7 @@ static int rt711_bus_config(struct sdw_slave *slave,
ret = rt711_clock_config(&slave->dev);
if (ret < 0)
- dev_err(&slave->dev, "Invalid clk config");
+ dev_err(&slave->dev, "%s: Invalid clk config", __func__);
return ret;
}
@@ -536,19 +536,19 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
return 0;
if (!slave->unattach_request) {
+ mutex_lock(&rt711->disable_irq_lock);
if (rt711->disable_irq == true) {
- mutex_lock(&rt711->disable_irq_lock);
sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
rt711->disable_irq = false;
- mutex_unlock(&rt711->disable_irq_lock);
}
+ mutex_unlock(&rt711->disable_irq_lock);
goto regmap_sync;
}
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT711_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
return -ETIMEDOUT;
}
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 66eaed13b0d6..5446f9506a16 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -37,8 +37,8 @@ static int rt711_index_write(struct regmap *regmap,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ pr_err("%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -52,8 +52,8 @@ static int rt711_index_read(struct regmap *regmap,
*value = 0;
ret = regmap_read(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ pr_err("%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -428,7 +428,7 @@ static void rt711_jack_init(struct rt711_priv *rt711)
RT711_HP_JD_FINAL_RESULT_CTL_JD12);
break;
default:
- dev_warn(rt711->component->dev, "Wrong JD source\n");
+ dev_warn(rt711->component->dev, "%s: Wrong JD source\n", __func__);
break;
}
@@ -1020,7 +1020,7 @@ static int rt711_pcm_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt711->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -1028,8 +1028,8 @@ static int rt711_pcm_hw_params(struct snd_pcm_substream *substream,
/* bit 3:0 Number of Channel */
val |= (params_channels(params) - 1);
} else {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt712-sdca-dmic.c b/sound/soc/codecs/rt712-sdca-dmic.c
index 0926b26619bd..012b79e72cf6 100644
--- a/sound/soc/codecs/rt712-sdca-dmic.c
+++ b/sound/soc/codecs/rt712-sdca-dmic.c
@@ -139,8 +139,8 @@ static int rt712_sdca_dmic_index_write(struct rt712_sdca_dmic_priv *rt712,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
dev_err(&rt712->slave->dev,
- "Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ "%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -155,8 +155,8 @@ static int rt712_sdca_dmic_index_read(struct rt712_sdca_dmic_priv *rt712,
ret = regmap_read(regmap, addr, value);
if (ret < 0)
dev_err(&rt712->slave->dev,
- "Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ "%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -317,7 +317,8 @@ static int rt712_sdca_dmic_set_gain_put(struct snd_kcontrol *kcontrol,
for (i = 0; i < p->count; i++) {
err = regmap_write(rt712->mbq_regmap, p->reg_base + i, gain_val[i]);
if (err < 0)
- dev_err(&rt712->slave->dev, "0x%08x can't be set\n", p->reg_base + i);
+ dev_err(&rt712->slave->dev, "%s: 0x%08x can't be set\n",
+ __func__, p->reg_base + i);
}
return changed;
@@ -667,13 +668,13 @@ static int rt712_sdca_dmic_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt712->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
if (params_channels(params) > 4) {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
@@ -698,8 +699,8 @@ static int rt712_sdca_dmic_hw_params(struct snd_pcm_substream *substream,
sampling_rate = RT712_SDCA_RATE_192000HZ;
break;
default:
- dev_err(component->dev, "Rate %d is not supported\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Rate %d is not supported\n",
+ __func__, params_rate(params));
return -EINVAL;
}
@@ -923,7 +924,8 @@ static int __maybe_unused rt712_sdca_dmic_dev_resume(struct device *dev)
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT712_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n",
+ __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt712-sdca-sdw.c b/sound/soc/codecs/rt712-sdca-sdw.c
index 01ac555cd79b..4e9ab3ef135b 100644
--- a/sound/soc/codecs/rt712-sdca-sdw.c
+++ b/sound/soc/codecs/rt712-sdca-sdw.c
@@ -438,20 +438,21 @@ static int __maybe_unused rt712_sdca_dev_resume(struct device *dev)
return 0;
if (!slave->unattach_request) {
+ mutex_lock(&rt712->disable_irq_lock);
if (rt712->disable_irq == true) {
- mutex_lock(&rt712->disable_irq_lock);
+
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
rt712->disable_irq = false;
- mutex_unlock(&rt712->disable_irq_lock);
}
+ mutex_unlock(&rt712->disable_irq_lock);
goto regmap_sync;
}
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT712_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c
index 6954fbe7ec5f..b503de9fda80 100644
--- a/sound/soc/codecs/rt712-sdca.c
+++ b/sound/soc/codecs/rt712-sdca.c
@@ -34,8 +34,8 @@ static int rt712_sdca_index_write(struct rt712_sdca_priv *rt712,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
dev_err(&rt712->slave->dev,
- "Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ "%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -50,8 +50,8 @@ static int rt712_sdca_index_read(struct rt712_sdca_priv *rt712,
ret = regmap_read(regmap, addr, value);
if (ret < 0)
dev_err(&rt712->slave->dev,
- "Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ "%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -1060,13 +1060,13 @@ static int rt712_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt712->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
if (params_channels(params) > 16) {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
@@ -1085,8 +1085,8 @@ static int rt712_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
sampling_rate = RT712_SDCA_RATE_192000HZ;
break;
default:
- dev_err(component->dev, "Rate %d is not supported\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Rate %d is not supported\n",
+ __func__, params_rate(params));
return -EINVAL;
}
@@ -1106,7 +1106,7 @@ static int rt712_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
sampling_rate);
break;
default:
- dev_err(component->dev, "Wrong DAI id\n");
+ dev_err(component->dev, "%s: Wrong DAI id\n", __func__);
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
index ab54a67a27eb..ee450126106f 100644
--- a/sound/soc/codecs/rt715-sdca-sdw.c
+++ b/sound/soc/codecs/rt715-sdca-sdw.c
@@ -237,7 +237,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
time = wait_for_completion_timeout(&slave->enumeration_complete,
msecs_to_jiffies(RT715_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Enumeration not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Enumeration not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
index 4533eedd7e18..3fb7b9adb61d 100644
--- a/sound/soc/codecs/rt715-sdca.c
+++ b/sound/soc/codecs/rt715-sdca.c
@@ -41,8 +41,8 @@ static int rt715_sdca_index_write(struct rt715_sdca_priv *rt715,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
dev_err(&rt715->slave->dev,
- "Failed to set private value: %08x <= %04x %d\n",
- addr, value, ret);
+ "%s: Failed to set private value: %08x <= %04x %d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -59,8 +59,8 @@ static int rt715_sdca_index_read(struct rt715_sdca_priv *rt715,
ret = regmap_read(regmap, addr, value);
if (ret < 0)
dev_err(&rt715->slave->dev,
- "Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ "%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -152,8 +152,8 @@ static int rt715_sdca_set_amp_gain_put(struct snd_kcontrol *kcontrol,
mc->shift);
ret = regmap_write(rt715->mbq_regmap, mc->reg + i, gain_val);
if (ret != 0) {
- dev_err(component->dev, "Failed to write 0x%x=0x%x\n",
- mc->reg + i, gain_val);
+ dev_err(component->dev, "%s: Failed to write 0x%x=0x%x\n",
+ __func__, mc->reg + i, gain_val);
return ret;
}
}
@@ -188,8 +188,8 @@ static int rt715_sdca_set_amp_gain_4ch_put(struct snd_kcontrol *kcontrol,
ret = regmap_write(rt715->mbq_regmap, reg_base + i,
gain_val);
if (ret != 0) {
- dev_err(component->dev, "Failed to write 0x%x=0x%x\n",
- reg_base + i, gain_val);
+ dev_err(component->dev, "%s: Failed to write 0x%x=0x%x\n",
+ __func__, reg_base + i, gain_val);
return ret;
}
}
@@ -224,8 +224,8 @@ static int rt715_sdca_set_amp_gain_8ch_put(struct snd_kcontrol *kcontrol,
reg = i < 7 ? reg_base + i : (reg_base - 1) | BIT(15);
ret = regmap_write(rt715->mbq_regmap, reg, gain_val);
if (ret != 0) {
- dev_err(component->dev, "Failed to write 0x%x=0x%x\n",
- reg, gain_val);
+ dev_err(component->dev, "%s: Failed to write 0x%x=0x%x\n",
+ __func__, reg, gain_val);
return ret;
}
}
@@ -246,8 +246,8 @@ static int rt715_sdca_set_amp_gain_get(struct snd_kcontrol *kcontrol,
for (i = 0; i < 2; i++) {
ret = regmap_read(rt715->mbq_regmap, mc->reg + i, &val);
if (ret < 0) {
- dev_err(component->dev, "Failed to read 0x%x, ret=%d\n",
- mc->reg + i, ret);
+ dev_err(component->dev, "%s: Failed to read 0x%x, ret=%d\n",
+ __func__, mc->reg + i, ret);
return ret;
}
ucontrol->value.integer.value[i] = rt715_sdca_get_gain(val, mc->shift);
@@ -271,8 +271,8 @@ static int rt715_sdca_set_amp_gain_4ch_get(struct snd_kcontrol *kcontrol,
for (i = 0; i < 4; i++) {
ret = regmap_read(rt715->mbq_regmap, reg_base + i, &val);
if (ret < 0) {
- dev_err(component->dev, "Failed to read 0x%x, ret=%d\n",
- reg_base + i, ret);
+ dev_err(component->dev, "%s: Failed to read 0x%x, ret=%d\n",
+ __func__, reg_base + i, ret);
return ret;
}
ucontrol->value.integer.value[i] = rt715_sdca_get_gain(val, gain_sft);
@@ -297,8 +297,8 @@ static int rt715_sdca_set_amp_gain_8ch_get(struct snd_kcontrol *kcontrol,
for (i = 0; i < 8; i += 2) {
ret = regmap_read(rt715->mbq_regmap, reg_base + i, &val_l);
if (ret < 0) {
- dev_err(component->dev, "Failed to read 0x%x, ret=%d\n",
- reg_base + i, ret);
+ dev_err(component->dev, "%s: Failed to read 0x%x, ret=%d\n",
+ __func__, reg_base + i, ret);
return ret;
}
ucontrol->value.integer.value[i] = (val_l >> gain_sft) / 10;
@@ -306,8 +306,8 @@ static int rt715_sdca_set_amp_gain_8ch_get(struct snd_kcontrol *kcontrol,
reg = (i == 6) ? (reg_base - 1) | BIT(15) : reg_base + 1 + i;
ret = regmap_read(rt715->mbq_regmap, reg, &val_r);
if (ret < 0) {
- dev_err(component->dev, "Failed to read 0x%x, ret=%d\n",
- reg, ret);
+ dev_err(component->dev, "%s: Failed to read 0x%x, ret=%d\n",
+ __func__, reg, ret);
return ret;
}
ucontrol->value.integer.value[i + 1] = (val_r >> gain_sft) / 10;
@@ -834,15 +834,15 @@ static int rt715_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
0xaf00);
break;
default:
- dev_err(component->dev, "Invalid DAI id %d\n", dai->id);
+ dev_err(component->dev, "%s: Invalid DAI id %d\n", __func__, dai->id);
return -EINVAL;
}
retval = sdw_stream_add_slave(rt715->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(component->dev, "Unable to configure port, retval:%d\n",
- retval);
+ dev_err(component->dev, "%s: Unable to configure port, retval:%d\n",
+ __func__, retval);
return retval;
}
@@ -893,8 +893,8 @@ static int rt715_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
val = 0xf;
break;
default:
- dev_err(component->dev, "Unsupported sample rate %d\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Unsupported sample rate %d\n",
+ __func__, params_rate(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index 21f37babd148..7e13868ff99f 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -482,7 +482,7 @@ static int rt715_bus_config(struct sdw_slave *slave,
ret = rt715_clock_config(&slave->dev);
if (ret < 0)
- dev_err(&slave->dev, "Invalid clk config");
+ dev_err(&slave->dev, "%s: Invalid clk config", __func__);
return 0;
}
@@ -554,7 +554,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT715_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/rt715.c b/sound/soc/codecs/rt715.c
index 9f732a5abd53..299c9b12377c 100644
--- a/sound/soc/codecs/rt715.c
+++ b/sound/soc/codecs/rt715.c
@@ -40,8 +40,8 @@ static int rt715_index_write(struct regmap *regmap, unsigned int reg,
ret = regmap_write(regmap, addr, value);
if (ret < 0) {
- pr_err("Failed to set private value: %08x <= %04x %d\n",
- addr, value, ret);
+ pr_err("%s: Failed to set private value: %08x <= %04x %d\n",
+ __func__, addr, value, ret);
}
return ret;
@@ -55,8 +55,8 @@ static int rt715_index_write_nid(struct regmap *regmap,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ pr_err("%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -70,8 +70,8 @@ static int rt715_index_read_nid(struct regmap *regmap,
*value = 0;
ret = regmap_read(regmap, addr, value);
if (ret < 0)
- pr_err("Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ pr_err("%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -862,14 +862,14 @@ static int rt715_pcm_hw_params(struct snd_pcm_substream *substream,
rt715_index_write(rt715->regmap, RT715_SDW_INPUT_SEL, 0xa000);
break;
default:
- dev_err(component->dev, "Invalid DAI id %d\n", dai->id);
+ dev_err(component->dev, "%s: Invalid DAI id %d\n", __func__, dai->id);
return -EINVAL;
}
retval = sdw_stream_add_slave(rt715->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
@@ -883,8 +883,8 @@ static int rt715_pcm_hw_params(struct snd_pcm_substream *substream,
val |= 0x0 << 8;
break;
default:
- dev_err(component->dev, "Unsupported sample rate %d\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Unsupported sample rate %d\n",
+ __func__, params_rate(params));
return -EINVAL;
}
@@ -892,8 +892,8 @@ static int rt715_pcm_hw_params(struct snd_pcm_substream *substream,
/* bit 3:0 Number of Channel */
val |= (params_channels(params) - 1);
} else {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
index eb76f4c675b6..65d584c1886e 100644
--- a/sound/soc/codecs/rt722-sdca-sdw.c
+++ b/sound/soc/codecs/rt722-sdca-sdw.c
@@ -467,13 +467,13 @@ static int __maybe_unused rt722_sdca_dev_resume(struct device *dev)
return 0;
if (!slave->unattach_request) {
+ mutex_lock(&rt722->disable_irq_lock);
if (rt722->disable_irq == true) {
- mutex_lock(&rt722->disable_irq_lock);
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_6);
sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
rt722->disable_irq = false;
- mutex_unlock(&rt722->disable_irq_lock);
}
+ mutex_unlock(&rt722->disable_irq_lock);
goto regmap_sync;
}
diff --git a/sound/soc/codecs/rt722-sdca.c b/sound/soc/codecs/rt722-sdca.c
index 0e1c65a20392..e0ea3a23f7cc 100644
--- a/sound/soc/codecs/rt722-sdca.c
+++ b/sound/soc/codecs/rt722-sdca.c
@@ -35,8 +35,8 @@ int rt722_sdca_index_write(struct rt722_sdca_priv *rt722,
ret = regmap_write(regmap, addr, value);
if (ret < 0)
dev_err(&rt722->slave->dev,
- "Failed to set private value: %06x <= %04x ret=%d\n",
- addr, value, ret);
+ "%s: Failed to set private value: %06x <= %04x ret=%d\n",
+ __func__, addr, value, ret);
return ret;
}
@@ -51,8 +51,8 @@ int rt722_sdca_index_read(struct rt722_sdca_priv *rt722,
ret = regmap_read(regmap, addr, value);
if (ret < 0)
dev_err(&rt722->slave->dev,
- "Failed to get private value: %06x => %04x ret=%d\n",
- addr, *value, ret);
+ "%s: Failed to get private value: %06x => %04x ret=%d\n",
+ __func__, addr, *value, ret);
return ret;
}
@@ -663,7 +663,8 @@ static int rt722_sdca_dmic_set_gain_put(struct snd_kcontrol *kcontrol,
for (i = 0; i < p->count; i++) {
err = regmap_write(rt722->mbq_regmap, p->reg_base + i, gain_val[i]);
if (err < 0)
- dev_err(&rt722->slave->dev, "%#08x can't be set\n", p->reg_base + i);
+ dev_err(&rt722->slave->dev, "%s: %#08x can't be set\n",
+ __func__, p->reg_base + i);
}
return changed;
@@ -1211,13 +1212,13 @@ static int rt722_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
retval = sdw_stream_add_slave(rt722->slave, &stream_config,
&port_config, 1, sdw_stream);
if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
+ dev_err(dai->dev, "%s: Unable to configure port\n", __func__);
return retval;
}
if (params_channels(params) > 16) {
- dev_err(component->dev, "Unsupported channels %d\n",
- params_channels(params));
+ dev_err(component->dev, "%s: Unsupported channels %d\n",
+ __func__, params_channels(params));
return -EINVAL;
}
@@ -1236,8 +1237,8 @@ static int rt722_sdca_pcm_hw_params(struct snd_pcm_substream *substream,
sampling_rate = RT722_SDCA_RATE_192000HZ;
break;
default:
- dev_err(component->dev, "Rate %d is not supported\n",
- params_rate(params));
+ dev_err(component->dev, "%s: Rate %d is not supported\n",
+ __func__, params_rate(params));
return -EINVAL;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index e451c009f2d9..7d5c096e06cd 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -683,11 +683,12 @@ static void wm_adsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len)
{
- struct cs_dsp_coeff_ctl *cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
+ struct cs_dsp_coeff_ctl *cs_ctl;
struct wm_coeff_ctl *ctl;
int ret;
mutex_lock(&dsp->cs_dsp.pwr_lock);
+ cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
mutex_unlock(&dsp->cs_dsp.pwr_lock);
diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c
index c018f84fe025..fc072dc58968 100644
--- a/sound/soc/intel/avs/boards/da7219.c
+++ b/sound/soc/intel/avs/boards/da7219.c
@@ -296,5 +296,6 @@ static struct platform_driver avs_da7219_driver = {
module_platform_driver(avs_da7219_driver);
+MODULE_DESCRIPTION("Intel da7219 machine driver");
MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/dmic.c b/sound/soc/intel/avs/boards/dmic.c
index ba2bc7f689eb..d9e5e85f5233 100644
--- a/sound/soc/intel/avs/boards/dmic.c
+++ b/sound/soc/intel/avs/boards/dmic.c
@@ -96,4 +96,5 @@ static struct platform_driver avs_dmic_driver = {
module_platform_driver(avs_dmic_driver);
+MODULE_DESCRIPTION("Intel DMIC machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/es8336.c b/sound/soc/intel/avs/boards/es8336.c
index 1090082e7d5b..5c90a6007577 100644
--- a/sound/soc/intel/avs/boards/es8336.c
+++ b/sound/soc/intel/avs/boards/es8336.c
@@ -326,4 +326,5 @@ static struct platform_driver avs_es8336_driver = {
module_platform_driver(avs_es8336_driver);
+MODULE_DESCRIPTION("Intel es8336 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/i2s_test.c b/sound/soc/intel/avs/boards/i2s_test.c
index 28f254eb0d03..027373d6a16d 100644
--- a/sound/soc/intel/avs/boards/i2s_test.c
+++ b/sound/soc/intel/avs/boards/i2s_test.c
@@ -204,4 +204,5 @@ static struct platform_driver avs_i2s_test_driver = {
module_platform_driver(avs_i2s_test_driver);
+MODULE_DESCRIPTION("Intel i2s test machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c
index a83b95f25129..1ff85e4d8e16 100644
--- a/sound/soc/intel/avs/boards/max98357a.c
+++ b/sound/soc/intel/avs/boards/max98357a.c
@@ -154,4 +154,5 @@ static struct platform_driver avs_max98357a_driver = {
module_platform_driver(avs_max98357a_driver)
+MODULE_DESCRIPTION("Intel max98357a machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/max98373.c b/sound/soc/intel/avs/boards/max98373.c
index 3b980a025e6f..8d31586b73ea 100644
--- a/sound/soc/intel/avs/boards/max98373.c
+++ b/sound/soc/intel/avs/boards/max98373.c
@@ -211,4 +211,5 @@ static struct platform_driver avs_max98373_driver = {
module_platform_driver(avs_max98373_driver)
+MODULE_DESCRIPTION("Intel max98373 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/max98927.c b/sound/soc/intel/avs/boards/max98927.c
index 86dd2b228df3..572ec58073d0 100644
--- a/sound/soc/intel/avs/boards/max98927.c
+++ b/sound/soc/intel/avs/boards/max98927.c
@@ -208,4 +208,5 @@ static struct platform_driver avs_max98927_driver = {
module_platform_driver(avs_max98927_driver)
+MODULE_DESCRIPTION("Intel max98927 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c
index 1c1e2083f474..55db75efae41 100644
--- a/sound/soc/intel/avs/boards/nau8825.c
+++ b/sound/soc/intel/avs/boards/nau8825.c
@@ -313,4 +313,5 @@ static struct platform_driver avs_nau8825_driver = {
module_platform_driver(avs_nau8825_driver)
+MODULE_DESCRIPTION("Intel nau8825 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/probe.c b/sound/soc/intel/avs/boards/probe.c
index a9469b5ecb40..8be6887bbc6e 100644
--- a/sound/soc/intel/avs/boards/probe.c
+++ b/sound/soc/intel/avs/boards/probe.c
@@ -69,4 +69,5 @@ static struct platform_driver avs_probe_mb_driver = {
module_platform_driver(avs_probe_mb_driver);
+MODULE_DESCRIPTION("Intel probe machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt274.c b/sound/soc/intel/avs/boards/rt274.c
index bfcb8845fd15..1cf524216087 100644
--- a/sound/soc/intel/avs/boards/rt274.c
+++ b/sound/soc/intel/avs/boards/rt274.c
@@ -276,4 +276,5 @@ static struct platform_driver avs_rt274_driver = {
module_platform_driver(avs_rt274_driver);
+MODULE_DESCRIPTION("Intel rt274 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt286.c b/sound/soc/intel/avs/boards/rt286.c
index 28d7d86b1cc9..4740bba10570 100644
--- a/sound/soc/intel/avs/boards/rt286.c
+++ b/sound/soc/intel/avs/boards/rt286.c
@@ -247,4 +247,5 @@ static struct platform_driver avs_rt286_driver = {
module_platform_driver(avs_rt286_driver);
+MODULE_DESCRIPTION("Intel rt286 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt298.c b/sound/soc/intel/avs/boards/rt298.c
index 80f490b9e118..6e409e29f697 100644
--- a/sound/soc/intel/avs/boards/rt298.c
+++ b/sound/soc/intel/avs/boards/rt298.c
@@ -266,4 +266,5 @@ static struct platform_driver avs_rt298_driver = {
module_platform_driver(avs_rt298_driver);
+MODULE_DESCRIPTION("Intel rt298 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt5514.c b/sound/soc/intel/avs/boards/rt5514.c
index 60105f453ae2..097ae5f73241 100644
--- a/sound/soc/intel/avs/boards/rt5514.c
+++ b/sound/soc/intel/avs/boards/rt5514.c
@@ -192,4 +192,5 @@ static struct platform_driver avs_rt5514_driver = {
module_platform_driver(avs_rt5514_driver);
+MODULE_DESCRIPTION("Intel rt5514 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt5663.c b/sound/soc/intel/avs/boards/rt5663.c
index b4762c2a7bf2..1880c315cc4d 100644
--- a/sound/soc/intel/avs/boards/rt5663.c
+++ b/sound/soc/intel/avs/boards/rt5663.c
@@ -265,4 +265,5 @@ static struct platform_driver avs_rt5663_driver = {
module_platform_driver(avs_rt5663_driver);
+MODULE_DESCRIPTION("Intel rt5663 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/rt5682.c b/sound/soc/intel/avs/boards/rt5682.c
index 243f979fda98..594a971ded9e 100644
--- a/sound/soc/intel/avs/boards/rt5682.c
+++ b/sound/soc/intel/avs/boards/rt5682.c
@@ -341,5 +341,6 @@ static struct platform_driver avs_rt5682_driver = {
module_platform_driver(avs_rt5682_driver)
+MODULE_DESCRIPTION("Intel rt5682 machine driver");
MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
index 4a0e136835ff..d6f7f046c24e 100644
--- a/sound/soc/intel/avs/boards/ssm4567.c
+++ b/sound/soc/intel/avs/boards/ssm4567.c
@@ -200,4 +200,5 @@ static struct platform_driver avs_ssm4567_driver = {
module_platform_driver(avs_ssm4567_driver)
+MODULE_DESCRIPTION("Intel ssm4567 machine driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 2d25748ca706..b27e89ff6a16 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -263,7 +263,7 @@ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
int min = mc->min;
int sign_bit = mc->sign_bit;
- unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int mask = (1ULL << fls(max)) - 1;
unsigned int invert = mc->invert;
int val;
int ret;
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index be7dc1e02284..c12c7f820529 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -704,6 +704,10 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
goto unregister_dev;
}
+ ret = acp_init(sdev);
+ if (ret < 0)
+ goto free_smn_dev;
+
sdev->ipc_irq = pci->irq;
ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
IRQF_SHARED, "AudioDSP", sdev);
@@ -713,10 +717,6 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
goto free_smn_dev;
}
- ret = acp_init(sdev);
- if (ret < 0)
- goto free_ipc_irq;
-
/* scan SoundWire capabilities exposed by DSDT */
ret = acp_sof_scan_sdw_devices(sdev, chip->sdw_acpi_dev_addr);
if (ret < 0) {
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 9b00ede2a486..cc84d4c81be9 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -339,8 +339,7 @@ static int sof_init_environment(struct snd_sof_dev *sdev)
ret = snd_sof_probe(sdev);
if (ret < 0) {
dev_err(sdev->dev, "failed to probe DSP %d\n", ret);
- sof_ops_free(sdev);
- return ret;
+ goto err_sof_probe;
}
/* check machine info */
@@ -358,15 +357,18 @@ static int sof_init_environment(struct snd_sof_dev *sdev)
ret = validate_sof_ops(sdev);
if (ret < 0) {
snd_sof_remove(sdev);
+ snd_sof_remove_late(sdev);
return ret;
}
}
+ return 0;
+
err_machine_check:
- if (ret) {
- snd_sof_remove(sdev);
- sof_ops_free(sdev);
- }
+ snd_sof_remove(sdev);
+err_sof_probe:
+ snd_sof_remove_late(sdev);
+ sof_ops_free(sdev);
return ret;
}
diff --git a/sound/soc/sof/intel/hda-common-ops.c b/sound/soc/sof/intel/hda-common-ops.c
index 2b385cddc385..d71bb66b9991 100644
--- a/sound/soc/sof/intel/hda-common-ops.c
+++ b/sound/soc/sof/intel/hda-common-ops.c
@@ -57,6 +57,9 @@ struct snd_sof_dsp_ops sof_hda_common_ops = {
.pcm_pointer = hda_dsp_pcm_pointer,
.pcm_ack = hda_dsp_pcm_ack,
+ .get_dai_frame_counter = hda_dsp_get_stream_llp,
+ .get_host_byte_counter = hda_dsp_get_stream_ldp,
+
/* firmware loading */
.load_firmware = snd_sof_load_firmware_raw,
diff --git a/sound/soc/sof/intel/hda-dai-ops.c b/sound/soc/sof/intel/hda-dai-ops.c
index c50ca9e72d37..b073720b4cf4 100644
--- a/sound/soc/sof/intel/hda-dai-ops.c
+++ b/sound/soc/sof/intel/hda-dai-ops.c
@@ -7,6 +7,7 @@
#include <sound/pcm_params.h>
#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
#include <sound/hda-mlink.h>
#include <sound/sof/ipc4/header.h>
#include <uapi/sound/sof/header.h>
@@ -362,6 +363,16 @@ static int hda_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
snd_hdac_ext_stream_clear(hext_stream);
+
+ /*
+ * Save the LLP registers in case the stream is
+ * restarting due PAUSE_RELEASE, or START without a pcm
+ * close/open since in this case the LLP register is not reset
+ * to 0 and the delay calculation will return with invalid
+ * results.
+ */
+ hext_stream->pplcllpl = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPL);
+ hext_stream->pplcllpu = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPU);
break;
default:
dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 31ffa1a8f2ac..ef5c915db8ff 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -681,17 +681,27 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
struct hdac_bus *bus = sof_to_bus(sdev);
+ bool imr_lost = false;
int ret, j;
/*
- * The memory used for IMR boot loses its content in deeper than S3 state
- * We must not try IMR boot on next power up (as it will fail).
- *
+ * The memory used for IMR boot loses its content in deeper than S3
+ * state on CAVS platforms.
+ * On ACE platforms due to the system architecture the IMR content is
+ * lost at S3 state already, they are tailored for s2idle use.
+ * We must not try IMR boot on next power up in these cases as it will
+ * fail.
+ */
+ if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
+ (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 &&
+ sdev->system_suspend_target == SOF_SUSPEND_S3))
+ imr_lost = true;
+
+ /*
* In case of firmware crash or boot failure set the skip_imr_boot to true
* as well in order to try to re-load the firmware to do a 'cold' boot.
*/
- if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
- sdev->fw_state == SOF_FW_CRASHED ||
+ if (imr_lost || sdev->fw_state == SOF_FW_CRASHED ||
sdev->fw_state == SOF_FW_BOOT_FAILED)
hda->skip_imr_boot = true;
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 18f07364d219..d7b446f3f973 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -259,8 +259,37 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S32);
+ /*
+ * The dsp_max_burst_size_in_ms is the length of the maximum burst size
+ * of the host DMA in the ALSA buffer.
+ *
+ * On playback start the DMA will transfer dsp_max_burst_size_in_ms
+ * amount of data in one initial burst to fill up the host DMA buffer.
+ * Consequent DMA burst sizes are shorter and their length can vary.
+ * To make sure that userspace allocate large enough ALSA buffer we need
+ * to place a constraint on the buffer time.
+ *
+ * On capture the DMA will transfer 1ms chunks.
+ *
+ * Exact dsp_max_burst_size_in_ms constraint is racy, so set the
+ * constraint to a minimum of 2x dsp_max_burst_size_in_ms.
+ */
+ if (spcm->stream[direction].dsp_max_burst_size_in_ms)
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+ spcm->stream[direction].dsp_max_burst_size_in_ms * USEC_PER_MSEC * 2,
+ UINT_MAX);
+
/* binding pcm substream to hda stream */
substream->runtime->private_data = &dsp_stream->hstream;
+
+ /*
+ * Reset the llp cache values (they are used for LLP compensation in
+ * case the counter is not reset)
+ */
+ dsp_stream->pplcllpl = 0;
+ dsp_stream->pplcllpu = 0;
+
return 0;
}
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index b387b1a69d7e..0c189d3b19c1 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -1063,3 +1063,73 @@ snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
return pos;
}
+
+#define merge_u64(u32_u, u32_l) (((u64)(u32_u) << 32) | (u32_l))
+
+/**
+ * hda_dsp_get_stream_llp - Retrieve the LLP (Linear Link Position) of the stream
+ * @sdev: SOF device
+ * @component: ASoC component
+ * @substream: PCM substream
+ *
+ * Returns the raw Linear Link Position value
+ */
+u64 hda_dsp_get_stream_llp(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ u32 llp_l, llp_u;
+
+ /*
+ * The pplc_addr have been calculated during probe in
+ * hda_dsp_stream_init():
+ * pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ * SOF_HDA_PPLC_BASE +
+ * SOF_HDA_PPLC_MULTI * total_stream +
+ * SOF_HDA_PPLC_INTERVAL * stream_index
+ *
+ * Use this pre-calculated address to avoid repeated re-calculation.
+ */
+ llp_l = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPL);
+ llp_u = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPU);
+
+ /* Compensate the LLP counter with the saved offset */
+ if (hext_stream->pplcllpl || hext_stream->pplcllpu)
+ return merge_u64(llp_u, llp_l) -
+ merge_u64(hext_stream->pplcllpu, hext_stream->pplcllpl);
+
+ return merge_u64(llp_u, llp_l);
+}
+
+/**
+ * hda_dsp_get_stream_ldp - Retrieve the LDP (Linear DMA Position) of the stream
+ * @sdev: SOF device
+ * @component: ASoC component
+ * @substream: PCM substream
+ *
+ * Returns the raw Linear Link Position value
+ */
+u64 hda_dsp_get_stream_ldp(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ u32 ldp_l, ldp_u;
+
+ /*
+ * The pphc_addr have been calculated during probe in
+ * hda_dsp_stream_init():
+ * pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ * SOF_HDA_PPHC_BASE +
+ * SOF_HDA_PPHC_INTERVAL * stream_index
+ *
+ * Use this pre-calculated address to avoid repeated re-calculation.
+ */
+ ldp_l = readl(hext_stream->pphc_addr + AZX_REG_PPHCLDPL);
+ ldp_u = readl(hext_stream->pphc_addr + AZX_REG_PPHCLDPU);
+
+ return ((u64)ldp_u << 32) | ldp_l;
+}
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index b36eb7c78913..81a1d4606d3c 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -662,6 +662,12 @@ bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
int direction, bool can_sleep);
+u64 hda_dsp_get_stream_llp(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+u64 hda_dsp_get_stream_ldp(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
struct hdac_ext_stream *
hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags);
diff --git a/sound/soc/sof/intel/lnl.c b/sound/soc/sof/intel/lnl.c
index 7ae017a00184..aeb4350cce6b 100644
--- a/sound/soc/sof/intel/lnl.c
+++ b/sound/soc/sof/intel/lnl.c
@@ -29,15 +29,17 @@ static const struct snd_sof_debugfs_map lnl_dsp_debugfs[] = {
};
/* this helps allows the DSP to setup DMIC/SSP */
-static int hdac_bus_offload_dmic_ssp(struct hdac_bus *bus)
+static int hdac_bus_offload_dmic_ssp(struct hdac_bus *bus, bool enable)
{
int ret;
- ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_SSP, true);
+ ret = hdac_bus_eml_enable_offload(bus, true,
+ AZX_REG_ML_LEPTR_ID_INTEL_SSP, enable);
if (ret < 0)
return ret;
- ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_DMIC, true);
+ ret = hdac_bus_eml_enable_offload(bus, true,
+ AZX_REG_ML_LEPTR_ID_INTEL_DMIC, enable);
if (ret < 0)
return ret;
@@ -52,7 +54,19 @@ static int lnl_hda_dsp_probe(struct snd_sof_dev *sdev)
if (ret < 0)
return ret;
- return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev), true);
+}
+
+static void lnl_hda_dsp_remove(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = hdac_bus_offload_dmic_ssp(sof_to_bus(sdev), false);
+ if (ret < 0)
+ dev_warn(sdev->dev,
+ "Failed to disable offload for DMIC/SSP: %d\n", ret);
+
+ hda_dsp_remove(sdev);
}
static int lnl_hda_dsp_resume(struct snd_sof_dev *sdev)
@@ -63,7 +77,7 @@ static int lnl_hda_dsp_resume(struct snd_sof_dev *sdev)
if (ret < 0)
return ret;
- return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev), true);
}
static int lnl_hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
@@ -74,7 +88,7 @@ static int lnl_hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
if (ret < 0)
return ret;
- return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev), true);
}
static int lnl_dsp_post_fw_run(struct snd_sof_dev *sdev)
@@ -97,9 +111,11 @@ int sof_lnl_ops_init(struct snd_sof_dev *sdev)
/* common defaults */
memcpy(&sof_lnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
- /* probe */
- if (!sdev->dspless_mode_selected)
+ /* probe/remove */
+ if (!sdev->dspless_mode_selected) {
sof_lnl_ops.probe = lnl_hda_dsp_probe;
+ sof_lnl_ops.remove = lnl_hda_dsp_remove;
+ }
/* shutdown */
sof_lnl_ops.shutdown = hda_dsp_shutdown;
@@ -134,8 +150,6 @@ int sof_lnl_ops_init(struct snd_sof_dev *sdev)
sof_lnl_ops.runtime_resume = lnl_hda_dsp_runtime_resume;
}
- sof_lnl_ops.get_stream_position = mtl_dsp_get_stream_hda_link_position;
-
/* dsp core get/put */
sof_lnl_ops.core_get = mtl_dsp_core_get;
sof_lnl_ops.core_put = mtl_dsp_core_put;
diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
index df05dc77b8d5..060c34988e90 100644
--- a/sound/soc/sof/intel/mtl.c
+++ b/sound/soc/sof/intel/mtl.c
@@ -626,18 +626,6 @@ static int mtl_dsp_disable_interrupts(struct snd_sof_dev *sdev)
return mtl_enable_interrupts(sdev, false);
}
-u64 mtl_dsp_get_stream_hda_link_position(struct snd_sof_dev *sdev,
- struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
-{
- struct hdac_stream *hstream = substream->runtime->private_data;
- u32 llp_l, llp_u;
-
- llp_l = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, MTL_PPLCLLPL(hstream->index));
- llp_u = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, MTL_PPLCLLPU(hstream->index));
- return ((u64)llp_u << 32) | llp_l;
-}
-
int mtl_dsp_core_get(struct snd_sof_dev *sdev, int core)
{
const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
@@ -707,8 +695,6 @@ int sof_mtl_ops_init(struct snd_sof_dev *sdev)
sof_mtl_ops.core_get = mtl_dsp_core_get;
sof_mtl_ops.core_put = mtl_dsp_core_put;
- sof_mtl_ops.get_stream_position = mtl_dsp_get_stream_hda_link_position;
-
sdev->private = kzalloc(sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
if (!sdev->private)
return -ENOMEM;
diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
index cc5a1f46fd09..ea8c1b83f712 100644
--- a/sound/soc/sof/intel/mtl.h
+++ b/sound/soc/sof/intel/mtl.h
@@ -6,12 +6,6 @@
* Copyright(c) 2020-2022 Intel Corporation. All rights reserved.
*/
-/* HDA Registers */
-#define MTL_PPLCLLPL_BASE 0x948
-#define MTL_PPLCLLPU_STRIDE 0x10
-#define MTL_PPLCLLPL(x) (MTL_PPLCLLPL_BASE + (x) * MTL_PPLCLLPU_STRIDE)
-#define MTL_PPLCLLPU(x) (MTL_PPLCLLPL_BASE + 0x4 + (x) * MTL_PPLCLLPU_STRIDE)
-
/* DSP Registers */
#define MTL_HFDSSCS 0x1000
#define MTL_HFDSSCS_SPA_MASK BIT(16)
@@ -103,9 +97,5 @@ int mtl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
void mtl_ipc_dump(struct snd_sof_dev *sdev);
-u64 mtl_dsp_get_stream_hda_link_position(struct snd_sof_dev *sdev,
- struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-
int mtl_dsp_core_get(struct snd_sof_dev *sdev, int core);
int mtl_dsp_core_put(struct snd_sof_dev *sdev, int core);
diff --git a/sound/soc/sof/ipc4-mtrace.c b/sound/soc/sof/ipc4-mtrace.c
index 9f1e33ee8826..0e04bea9432d 100644
--- a/sound/soc/sof/ipc4-mtrace.c
+++ b/sound/soc/sof/ipc4-mtrace.c
@@ -4,6 +4,7 @@
#include <linux/debugfs.h>
#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
#include <sound/sof/ipc4/header.h>
#include "sof-priv.h"
#include "ipc4-priv.h"
@@ -412,7 +413,6 @@ static int ipc4_mtrace_enable(struct snd_sof_dev *sdev)
const struct sof_ipc_ops *iops = sdev->ipc->ops;
struct sof_ipc4_msg msg;
u64 system_time;
- ktime_t kt;
int ret;
if (priv->mtrace_state != SOF_MTRACE_DISABLED)
@@ -424,9 +424,12 @@ static int ipc4_mtrace_enable(struct snd_sof_dev *sdev)
msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_SYSTEM_TIME);
- /* The system time is in usec, UTC, epoch is 1601-01-01 00:00:00 */
- kt = ktime_add_us(ktime_get_real(), FW_EPOCH_DELTA * USEC_PER_SEC);
- system_time = ktime_to_us(kt);
+ /*
+ * local_clock() is used to align with dmesg, so both kernel and firmware logs have
+ * the same base and a minor delta due to the IPC. system time is in us format but
+ * local_clock() returns the time in ns, so convert to ns.
+ */
+ system_time = div64_u64(local_clock(), NSEC_PER_USEC);
msg.data_size = sizeof(system_time);
msg.data_ptr = &system_time;
ret = iops->set_get_data(sdev, &msg, msg.data_size, true);
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 0f332c8cdbe6..e915f9f87a6c 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -15,6 +15,28 @@
#include "ipc4-topology.h"
#include "ipc4-fw-reg.h"
+/**
+ * struct sof_ipc4_timestamp_info - IPC4 timestamp info
+ * @host_copier: the host copier of the pcm stream
+ * @dai_copier: the dai copier of the pcm stream
+ * @stream_start_offset: reported by fw in memory window (converted to frames)
+ * @stream_end_offset: reported by fw in memory window (converted to frames)
+ * @llp_offset: llp offset in memory window
+ * @boundary: wrap boundary should be used for the LLP frame counter
+ * @delay: Calculated and stored in pointer callback. The stored value is
+ * returned in the delay callback.
+ */
+struct sof_ipc4_timestamp_info {
+ struct sof_ipc4_copier *host_copier;
+ struct sof_ipc4_copier *dai_copier;
+ u64 stream_start_offset;
+ u64 stream_end_offset;
+ u32 llp_offset;
+
+ u64 boundary;
+ snd_pcm_sframes_t delay;
+};
+
static int sof_ipc4_set_multi_pipeline_state(struct snd_sof_dev *sdev, u32 state,
struct ipc4_pipeline_set_state_data *trigger_list)
{
@@ -423,8 +445,19 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
}
/* return if this is the final state */
- if (state == SOF_IPC4_PIPE_PAUSED)
+ if (state == SOF_IPC4_PIPE_PAUSED) {
+ struct sof_ipc4_timestamp_info *time_info;
+
+ /*
+ * Invalidate the stream_start_offset to make sure that it is
+ * going to be updated if the stream resumes
+ */
+ time_info = spcm->stream[substream->stream].private;
+ if (time_info)
+ time_info->stream_start_offset = SOF_IPC4_INVALID_STREAM_POSITION;
+
goto free;
+ }
skip_pause_transition:
/* else set the RUNNING/RESET state in the DSP */
ret = sof_ipc4_set_multi_pipeline_state(sdev, state, trigger_list);
@@ -464,14 +497,12 @@ static int sof_ipc4_pcm_trigger(struct snd_soc_component *component,
/* determine the pipeline state */
switch (cmd) {
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- state = SOF_IPC4_PIPE_PAUSED;
- break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
state = SOF_IPC4_PIPE_RUNNING;
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
state = SOF_IPC4_PIPE_PAUSED;
@@ -703,6 +734,10 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
if (abi_version < SOF_IPC4_FW_REGS_ABI_VER)
support_info = false;
+ /* For delay reporting the get_host_byte_counter callback is needed */
+ if (!sof_ops(sdev) || !sof_ops(sdev)->get_host_byte_counter)
+ support_info = false;
+
for_each_pcm_streams(stream) {
pipeline_list = &spcm->stream[stream].pipeline_list;
@@ -835,7 +870,6 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
struct sof_ipc4_copier *host_copier = time_info->host_copier;
struct sof_ipc4_copier *dai_copier = time_info->dai_copier;
struct sof_ipc4_pipeline_registers ppl_reg;
- u64 stream_start_position;
u32 dai_sample_size;
u32 ch, node_index;
u32 offset;
@@ -852,38 +886,51 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
if (ppl_reg.stream_start_offset == SOF_IPC4_INVALID_STREAM_POSITION)
return -EINVAL;
- stream_start_position = ppl_reg.stream_start_offset;
ch = dai_copier->data.out_format.fmt_cfg;
ch = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(ch);
dai_sample_size = (dai_copier->data.out_format.bit_depth >> 3) * ch;
- /* convert offset to sample count */
- do_div(stream_start_position, dai_sample_size);
- time_info->stream_start_offset = stream_start_position;
+
+ /* convert offsets to frame count */
+ time_info->stream_start_offset = ppl_reg.stream_start_offset;
+ do_div(time_info->stream_start_offset, dai_sample_size);
+ time_info->stream_end_offset = ppl_reg.stream_end_offset;
+ do_div(time_info->stream_end_offset, dai_sample_size);
+
+ /*
+ * Calculate the wrap boundary need to be used for delay calculation
+ * The host counter is in bytes, it will wrap earlier than the frames
+ * based link counter.
+ */
+ time_info->boundary = div64_u64(~((u64)0),
+ frames_to_bytes(substream->runtime, 1));
+ /* Initialize the delay value to 0 (no delay) */
+ time_info->delay = 0;
return 0;
}
-static snd_pcm_sframes_t sof_ipc4_pcm_delay(struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
+static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t *pointer)
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct sof_ipc4_timestamp_info *time_info;
struct sof_ipc4_llp_reading_slot llp;
- snd_pcm_uframes_t head_ptr, tail_ptr;
+ snd_pcm_uframes_t head_cnt, tail_cnt;
struct snd_sof_pcm_stream *stream;
+ u64 dai_cnt, host_cnt, host_ptr;
struct snd_sof_pcm *spcm;
- u64 tmp_ptr;
int ret;
spcm = snd_sof_find_spcm_dai(component, rtd);
if (!spcm)
- return 0;
+ return -EOPNOTSUPP;
stream = &spcm->stream[substream->stream];
time_info = stream->private;
if (!time_info)
- return 0;
+ return -EOPNOTSUPP;
/*
* stream_start_offset is updated to memory window by FW based on
@@ -893,45 +940,116 @@ static snd_pcm_sframes_t sof_ipc4_pcm_delay(struct snd_soc_component *component,
if (time_info->stream_start_offset == SOF_IPC4_INVALID_STREAM_POSITION) {
ret = sof_ipc4_get_stream_start_offset(sdev, substream, stream, time_info);
if (ret < 0)
- return 0;
+ return -EOPNOTSUPP;
}
+ /* For delay calculation we need the host counter */
+ host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
+ host_ptr = host_cnt;
+
+ /* convert the host_cnt to frames */
+ host_cnt = div64_u64(host_cnt, frames_to_bytes(substream->runtime, 1));
+
/*
- * HDaudio links don't support the LLP counter reported by firmware
- * the link position is read directly from hardware registers.
+ * If the LLP counter is not reported by firmware in the SRAM window
+ * then read the dai (link) counter via host accessible means if
+ * available.
*/
if (!time_info->llp_offset) {
- tmp_ptr = snd_sof_pcm_get_stream_position(sdev, component, substream);
- if (!tmp_ptr)
- return 0;
+ dai_cnt = snd_sof_pcm_get_dai_frame_counter(sdev, component, substream);
+ if (!dai_cnt)
+ return -EOPNOTSUPP;
} else {
sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
- tmp_ptr = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
+ dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
}
+ dai_cnt += time_info->stream_end_offset;
- /* In two cases dai dma position is not accurate
+ /* In two cases dai dma counter is not accurate
* (1) dai pipeline is started before host pipeline
- * (2) multiple streams mixed into one. Each stream has the same dai dma position
+ * (2) multiple streams mixed into one. Each stream has the same dai dma
+ * counter
*
- * Firmware calculates correct stream_start_offset for all cases including above two.
- * Driver subtracts stream_start_offset from dai dma position to get accurate one
+ * Firmware calculates correct stream_start_offset for all cases
+ * including above two.
+ * Driver subtracts stream_start_offset from dai dma counter to get
+ * accurate one
*/
- tmp_ptr -= time_info->stream_start_offset;
- /* Calculate the delay taking into account that both pointer can wrap */
- div64_u64_rem(tmp_ptr, substream->runtime->boundary, &tmp_ptr);
+ /*
+ * On stream start the dai counter might not yet have reached the
+ * stream_start_offset value which means that no frames have left the
+ * DSP yet from the audio stream (on playback, capture streams have
+ * offset of 0 as we start capturing right away).
+ * In this case we need to adjust the distance between the counters by
+ * increasing the host counter by (offset - dai_counter).
+ * Otherwise the dai_counter needs to be adjusted to reflect the number
+ * of valid frames passed on the DAI side.
+ *
+ * The delay is the difference between the counters on the two
+ * sides of the DSP.
+ */
+ if (dai_cnt < time_info->stream_start_offset) {
+ host_cnt += time_info->stream_start_offset - dai_cnt;
+ dai_cnt = 0;
+ } else {
+ dai_cnt -= time_info->stream_start_offset;
+ }
+
+ /* Wrap the dai counter at the boundary where the host counter wraps */
+ div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- head_ptr = substream->runtime->status->hw_ptr;
- tail_ptr = tmp_ptr;
+ head_cnt = host_cnt;
+ tail_cnt = dai_cnt;
} else {
- head_ptr = tmp_ptr;
- tail_ptr = substream->runtime->status->hw_ptr;
+ head_cnt = dai_cnt;
+ tail_cnt = host_cnt;
+ }
+
+ if (head_cnt < tail_cnt) {
+ time_info->delay = time_info->boundary - tail_cnt + head_cnt;
+ goto out;
}
- if (head_ptr < tail_ptr)
- return substream->runtime->boundary - tail_ptr + head_ptr;
+ time_info->delay = head_cnt - tail_cnt;
+
+out:
+ /*
+ * Convert the host byte counter to PCM pointer which wraps in buffer
+ * and it is in frames
+ */
+ div64_u64_rem(host_ptr, snd_pcm_lib_buffer_bytes(substream), &host_ptr);
+ *pointer = bytes_to_frames(substream->runtime, host_ptr);
+
+ return 0;
+}
+
+static snd_pcm_sframes_t sof_ipc4_pcm_delay(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct sof_ipc4_timestamp_info *time_info;
+ struct snd_sof_pcm_stream *stream;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return 0;
+
+ stream = &spcm->stream[substream->stream];
+ time_info = stream->private;
+ /*
+ * Report the stored delay value calculated in the pointer callback.
+ * In the unlikely event that the calculation was skipped/aborted, the
+ * default 0 delay returned.
+ */
+ if (time_info)
+ return time_info->delay;
+
+ /* No delay information available, report 0 as delay */
+ return 0;
- return head_ptr - tail_ptr;
}
const struct sof_ipc_pcm_ops ipc4_pcm_ops = {
@@ -941,6 +1059,7 @@ const struct sof_ipc_pcm_ops ipc4_pcm_ops = {
.dai_link_fixup = sof_ipc4_pcm_dai_link_fixup,
.pcm_setup = sof_ipc4_pcm_setup,
.pcm_free = sof_ipc4_pcm_free,
+ .pointer = sof_ipc4_pcm_pointer,
.delay = sof_ipc4_pcm_delay,
.ipc_first_on_start = true,
.platform_stop_during_hw_free = true,
diff --git a/sound/soc/sof/ipc4-priv.h b/sound/soc/sof/ipc4-priv.h
index f3b908b093f9..afed618a15f0 100644
--- a/sound/soc/sof/ipc4-priv.h
+++ b/sound/soc/sof/ipc4-priv.h
@@ -92,20 +92,6 @@ struct sof_ipc4_fw_data {
struct mutex pipeline_state_mutex; /* protect pipeline triggers, ref counts and states */
};
-/**
- * struct sof_ipc4_timestamp_info - IPC4 timestamp info
- * @host_copier: the host copier of the pcm stream
- * @dai_copier: the dai copier of the pcm stream
- * @stream_start_offset: reported by fw in memory window
- * @llp_offset: llp offset in memory window
- */
-struct sof_ipc4_timestamp_info {
- struct sof_ipc4_copier *host_copier;
- struct sof_ipc4_copier *dai_copier;
- u64 stream_start_offset;
- u32 llp_offset;
-};
-
extern const struct sof_ipc_fw_loader_ops ipc4_loader_ops;
extern const struct sof_ipc_tplg_ops ipc4_tplg_ops;
extern const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops;
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index da4a83afb87a..5cca05842126 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -412,8 +412,9 @@ static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget)
struct sof_ipc4_available_audio_format *available_fmt;
struct snd_soc_component *scomp = swidget->scomp;
struct sof_ipc4_copier *ipc4_copier;
+ struct snd_sof_pcm *spcm;
int node_type = 0;
- int ret;
+ int ret, dir;
ipc4_copier = kzalloc(sizeof(*ipc4_copier), GFP_KERNEL);
if (!ipc4_copier)
@@ -447,6 +448,25 @@ static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget)
}
dev_dbg(scomp->dev, "host copier '%s' node_type %u\n", swidget->widget->name, node_type);
+ spcm = snd_sof_find_spcm_comp(scomp, swidget->comp_id, &dir);
+ if (!spcm)
+ goto skip_gtw_cfg;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ struct snd_sof_pcm_stream *sps = &spcm->stream[dir];
+
+ sof_update_ipc_object(scomp, &sps->dsp_max_burst_size_in_ms,
+ SOF_COPIER_DEEP_BUFFER_TOKENS,
+ swidget->tuples,
+ swidget->num_tuples, sizeof(u32), 1);
+ /* Set default DMA buffer size if it is not specified in topology */
+ if (!sps->dsp_max_burst_size_in_ms)
+ sps->dsp_max_burst_size_in_ms = SOF_IPC4_MIN_DMA_BUFFER_SIZE;
+ } else {
+ /* Capture data is copied from DSP to host in 1ms bursts */
+ spcm->stream[dir].dsp_max_burst_size_in_ms = 1;
+ }
+
skip_gtw_cfg:
ipc4_copier->gtw_attr = kzalloc(sizeof(*ipc4_copier->gtw_attr), GFP_KERNEL);
if (!ipc4_copier->gtw_attr) {
@@ -1356,6 +1376,7 @@ static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_s
int sample_rate, channel_count;
int bit_depth, ret;
u32 nhlt_type;
+ int dev_type = 0;
/* convert to NHLT type */
switch (linktype) {
@@ -1371,18 +1392,30 @@ static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_s
&bit_depth);
if (ret < 0)
return ret;
+
+ /*
+ * We need to know the type of the external device attached to a SSP
+ * port to retrieve the blob from NHLT. However, device type is not
+ * specified in topology.
+ * Query the type for the port and then pass that information back
+ * to the blob lookup function.
+ */
+ dev_type = intel_nhlt_ssp_device_type(sdev->dev, ipc4_data->nhlt,
+ dai_index);
+ if (dev_type < 0)
+ return dev_type;
break;
default:
return 0;
}
- dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d\n",
- dai_index, nhlt_type, dir);
+ dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d dev type %d\n",
+ dai_index, nhlt_type, dir, dev_type);
/* find NHLT blob with matching params */
cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt, dai_index, nhlt_type,
bit_depth, bit_depth, channel_count, sample_rate,
- dir, 0);
+ dir, dev_type);
if (!cfg) {
dev_err(sdev->dev,
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index 6cf21e829e07..3cd748e13460 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -523,12 +523,26 @@ static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev,
return 0;
}
-static inline u64 snd_sof_pcm_get_stream_position(struct snd_sof_dev *sdev,
- struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
+static inline u64
+snd_sof_pcm_get_dai_frame_counter(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- if (sof_ops(sdev) && sof_ops(sdev)->get_stream_position)
- return sof_ops(sdev)->get_stream_position(sdev, component, substream);
+ if (sof_ops(sdev) && sof_ops(sdev)->get_dai_frame_counter)
+ return sof_ops(sdev)->get_dai_frame_counter(sdev, component,
+ substream);
+
+ return 0;
+}
+
+static inline u64
+snd_sof_pcm_get_host_byte_counter(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->get_host_byte_counter)
+ return sof_ops(sdev)->get_host_byte_counter(sdev, component,
+ substream);
return 0;
}
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 33d576b17647..f03cee94bce6 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -388,13 +388,21 @@ static snd_pcm_uframes_t sof_pcm_pointer(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
struct snd_sof_pcm *spcm;
snd_pcm_uframes_t host, dai;
+ int ret = -EOPNOTSUPP;
/* nothing to do for BE */
if (rtd->dai_link->no_pcm)
return 0;
+ if (pcm_ops && pcm_ops->pointer)
+ ret = pcm_ops->pointer(component, substream, &host);
+
+ if (ret != -EOPNOTSUPP)
+ return ret ? ret : host;
+
/* use dsp ops pointer callback directly if set */
if (sof_ops(sdev)->pcm_pointer)
return sof_ops(sdev)->pcm_pointer(sdev, substream);
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index 9ea2ac5adac7..86bbb531e142 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -103,7 +103,10 @@ struct snd_sof_dai_config_data {
* additional memory in the SOF PCM stream structure
* @pcm_free: Function pointer for PCM free that can be used for freeing any
* additional memory in the SOF PCM stream structure
- * @delay: Function pointer for pcm delay calculation
+ * @pointer: Function pointer for pcm pointer
+ * Note: the @pointer callback may return -EOPNOTSUPP which should be
+ * handled in a same way as if the callback is not provided
+ * @delay: Function pointer for pcm delay reporting
* @reset_hw_params_during_stop: Flag indicating whether the hw_params should be reset during the
* STOP pcm trigger
* @ipc_first_on_start: Send IPC before invoking platform trigger during
@@ -124,6 +127,9 @@ struct sof_ipc_pcm_ops {
int (*dai_link_fixup)(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params);
int (*pcm_setup)(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm);
void (*pcm_free)(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm);
+ int (*pointer)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t *pointer);
snd_pcm_sframes_t (*delay)(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
bool reset_hw_params_during_stop;
@@ -322,6 +328,7 @@ struct snd_sof_pcm_stream {
struct work_struct period_elapsed_work;
struct snd_soc_dapm_widget_list *list; /* list of connected DAPM widgets */
bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ unsigned int dsp_max_burst_size_in_ms; /* The maximum size of the host DMA burst in ms */
/*
* flag to indicate that the DSP pipelines should be kept
* active or not while suspending the stream
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index d453a4ce3b21..d3c436f82604 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -262,13 +262,25 @@ struct snd_sof_dsp_ops {
int (*pcm_ack)(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream); /* optional */
/*
- * optional callback to retrieve the link DMA position for the substream
- * when the position is not reported in the shared SRAM windows but
- * instead from a host-accessible hardware counter.
+ * optional callback to retrieve the number of frames left/arrived from/to
+ * the DSP on the DAI side (link/codec/DMIC/etc).
+ *
+ * The callback is used when the firmware does not provide this information
+ * via the shared SRAM window and it can be retrieved by host.
*/
- u64 (*get_stream_position)(struct snd_sof_dev *sdev,
- struct snd_soc_component *component,
- struct snd_pcm_substream *substream); /* optional */
+ u64 (*get_dai_frame_counter)(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /*
+ * Optional callback to retrieve the number of bytes left/arrived from/to
+ * the DSP on the host side (bytes between host ALSA buffer and DSP).
+ *
+ * The callback is needed for ALSA delay reporting.
+ */
+ u64 (*get_host_byte_counter)(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream); /* optional */
/* host read DSP stream data */
int (*ipc_msg_data)(struct snd_sof_dev *sdev,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index b67617b68e50..f4437015d43a 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -202,7 +202,7 @@ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
struct urb *urb;
/* create message: */
- msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
+ msg = kzalloc(sizeof(struct message), GFP_ATOMIC);
if (msg == NULL)
return -ENOMEM;
@@ -688,7 +688,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
int ret;
/* initialize USB buffers: */
- line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
+ line6->buffer_listen = kzalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
if (!line6->buffer_listen)
return -ENOMEM;
@@ -697,7 +697,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
return -ENOMEM;
if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
- line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
+ line6->buffer_message = kzalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
if (!line6->buffer_message)
return -ENOMEM;
diff --git a/tools/Makefile b/tools/Makefile
index 37e9f6804832..276f5d0d53a4 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -11,7 +11,6 @@ help:
@echo ''
@echo ' acpi - ACPI tools'
@echo ' bpf - misc BPF tools'
- @echo ' cgroup - cgroup tools'
@echo ' counter - counter tools'
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' debugging - tools for debugging'
@@ -69,7 +68,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
+counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
$(call descend,$@)
bpf/%: FORCE
@@ -116,7 +115,7 @@ freefall: FORCE
kvm_stat: FORCE
$(call descend,kvm/$@)
-all: acpi cgroup counter cpupower gpio hv firewire \
+all: acpi counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio mm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
@@ -128,7 +127,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
+counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
selftests_install:
@@ -155,7 +154,7 @@ freefall_install:
kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
-install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
+install: acpi_install counter_install cpupower_install gpio_install \
hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install mm_install bpf_install x86_energy_perf_policy_install \
@@ -169,7 +168,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
+counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
libapi_clean:
@@ -209,7 +208,7 @@ freefall_clean:
build_clean:
$(call descend,build,clean)
-clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
+clean: acpi_clean counter_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 7c7493cb571f..52f076afeb96 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -61,6 +61,7 @@
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0
+#define ARM_CPU_IMP_MICROSOFT 0x6D
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -135,6 +136,8 @@
#define AMPERE_CPU_PART_AMPERE1 0xAC3
+#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -193,6 +196,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 89d2fc872d9f..964df31da975 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -37,9 +37,7 @@
#include <asm/ptrace.h>
#include <asm/sve_context.h>
-#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -76,11 +74,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
- KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+ KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
- KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+ KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
@@ -162,6 +160,11 @@ struct kvm_sync_regs {
__u64 device_irq_level;
};
+/* Bits for run->s.regs.device_irq_level */
+#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
+#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
+#define KVM_ARM_DEV_PMU (1 << 2)
+
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 9f18fa090f1f..1691297a766a 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -28,7 +28,6 @@
#define __KVM_HAVE_PPC_SMT
#define __KVM_HAVE_IRQCHIP
#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_GUEST_DEBUG
/* Not always available, but if it is, this is the correct offset. */
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq {
#define KVM_XIVE_TIMA_PAGE_OFFSET 0
#define KVM_XIVE_ESB_PAGE_OFFSET 4
+/* for KVM_PPC_GET_PVINFO */
+
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
+struct kvm_ppc_pvinfo {
+ /* out */
+ __u32 flags;
+ __u32 hcall[4];
+ __u8 pad[108];
+};
+
+/* for KVM_PPC_GET_SMMU_INFO */
+#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
+
+struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+};
+
+struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
+#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
+
+struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u16 data_keys; /* # storage keys supported for data */
+ __u16 instr_keys; /* # storage keys supported for instructions */
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
+struct kvm_ppc_resize_hpt {
+ __u64 flags;
+ __u32 shift;
+ __u32 pad;
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index abe926d43cbe..05eaf6db3ad4 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -12,7 +12,320 @@
#include <linux/types.h>
#define __KVM_S390
-#define __KVM_HAVE_GUEST_DEBUG
+
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+#define KVM_S390_CMMA_PEEK (1 << 0)
+
+/**
+ * kvm_s390_cmma_log - Used for CMMA migration.
+ *
+ * Used both for input and output.
+ *
+ * @start_gfn: Guest page number to start from.
+ * @count: Size of the result buffer.
+ * @flags: Control operation mode via KVM_S390_CMMA_* flags
+ * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
+ * pages are still remaining.
+ * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
+ * in the PGSTE.
+ * @values: Pointer to the values buffer.
+ *
+ * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
+ */
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+
+/* for KVM_S390_MEM_OP */
+struct kvm_s390_mem_op {
+ /* in */
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ union {
+ struct {
+ __u8 ar; /* the access register number */
+ __u8 key; /* access key, ignored if flag unset */
+ __u8 pad1[6]; /* ignored */
+ __u64 old_addr; /* ignored if cmpxchg flag unset */
+ };
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* ignored */
+ };
+};
+/* types for kvm_s390_mem_op->op */
+#define KVM_S390_MEMOP_LOGICAL_READ 0
+#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
+#define KVM_S390_MEMOP_ABSOLUTE_READ 4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
+
+/* flags for kvm_s390_mem_op->flags */
+#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
+#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
+
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
+#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
+#define KVM_S390_MCHK 0xfffe1000u
+#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
+#define KVM_S390_INT_CPU_TIMER 0xffff1005u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
+/* Anything below 0xfffe0000u is taken by INT_IO */
+#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
+ (((schid)) | \
+ ((ssid) << 16) | \
+ ((cssid) << 18) | \
+ ((ai) << 26))
+#define KVM_S390_INT_IO_MIN 0x00000000u
+#define KVM_S390_INT_IO_MAX 0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK 0x04000000u
+
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+struct kvm_s390_io_info {
+ __u16 subchannel_id;
+ __u16 subchannel_nr;
+ __u32 io_int_parm;
+ __u32 io_int_word;
+};
+
+struct kvm_s390_ext_info {
+ __u32 ext_params;
+ __u32 pad;
+ __u64 ext_params2;
+};
+
+struct kvm_s390_pgm_info {
+ __u64 trans_exc_code;
+ __u64 mon_code;
+ __u64 per_address;
+ __u32 data_exc_code;
+ __u16 code;
+ __u16 mon_class_nr;
+ __u8 per_code;
+ __u8 per_atmid;
+ __u8 exc_access_id;
+ __u8 per_access_id;
+ __u8 op_access_id;
+#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
+#define KVM_S390_PGM_FLAGS_ILC_0 0x02
+#define KVM_S390_PGM_FLAGS_ILC_1 0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
+ __u8 flags;
+ __u8 pad[2];
+};
+
+struct kvm_s390_prefix_info {
+ __u32 address;
+};
+
+struct kvm_s390_extcall_info {
+ __u16 code;
+};
+
+struct kvm_s390_emerg_info {
+ __u16 code;
+};
+
+#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
+struct kvm_s390_stop_info {
+ __u32 flags;
+};
+
+struct kvm_s390_mchk_info {
+ __u64 cr14;
+ __u64 mcic;
+ __u64 failing_storage_address;
+ __u32 ext_damage_code;
+ __u32 pad;
+ __u8 fixed_logout[16];
+};
+
+struct kvm_s390_irq {
+ __u64 type;
+ union {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+ char reserved[64];
+ } u;
+};
+
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags; /* will stay unused for compatibility reasons */
+ __u32 len;
+ __u32 reserved[4]; /* will stay unused for compatibility reasons */
+};
+
+struct kvm_s390_ucas_mapping {
+ __u64 user_addr;
+ __u64 vcpu_addr;
+ __u64 length;
+};
+
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
+ KVM_PV_ASYNC_CLEANUP_PREPARE,
+ KVM_PV_ASYNC_CLEANUP_PERFORM,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Device control API: s390-specific devices */
#define KVM_DEV_FLIC_GET_ALL_IRQS 1
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 25160d26764b..a38f8f9ba657 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 21 /* N 32-bit words worth of info */
+#define NCAPINTS 22 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
@@ -81,10 +81,8 @@
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-
-/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
+#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
@@ -97,7 +95,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
@@ -462,6 +460,14 @@
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
/*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+ * CPUID levels like 0x80000022, etc.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
+
+/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
@@ -508,4 +514,5 @@
/* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
+#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 1f23960d2b06..c492bdc97b05 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -123,6 +123,12 @@
# define DISABLE_FRED (1 << (X86_FEATURE_FRED & 31))
#endif
+#ifdef CONFIG_KVM_AMD_SEV
+#define DISABLE_SEV_SNP 0
+#else
+#define DISABLE_SEV_SNP (1 << (X86_FEATURE_SEV_SNP & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -147,8 +153,9 @@
DISABLE_ENQCMD)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 (DISABLE_IBT)
-#define DISABLED_MASK19 0
+#define DISABLED_MASK19 (DISABLE_SEV_SNP)
#define DISABLED_MASK20 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define DISABLED_MASK21 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/irq_vectors.h b/tools/arch/x86/include/asm/irq_vectors.h
index 3f73ac3ed3a0..d18bfb238f66 100644
--- a/tools/arch/x86/include/asm/irq_vectors.h
+++ b/tools/arch/x86/include/asm/irq_vectors.h
@@ -84,11 +84,9 @@
#define HYPERVISOR_CALLBACK_VECTOR 0xf3
/* Vector for KVM to deliver posted interrupt IPI */
-#if IS_ENABLED(CONFIG_KVM)
#define POSTED_INTR_VECTOR 0xf2
#define POSTED_INTR_WAKEUP_VECTOR 0xf1
#define POSTED_INTR_NESTED_VECTOR 0xf0
-#endif
#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 1f9dc9bd13eb..05956bd8bacf 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -176,6 +176,14 @@
* CPU is not vulnerable to Gather
* Data Sampling (GDS).
*/
+#define ARCH_CAP_RFDS_NO BIT(27) /*
+ * Not susceptible to Register
+ * File Data Sampling.
+ */
+#define ARCH_CAP_RFDS_CLEAR BIT(28) /*
+ * VERW clears CPU Register
+ * File.
+ */
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR
@@ -605,34 +613,47 @@
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
-#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
-#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
-
-/* SNP feature bits enabled by the hypervisor */
-#define MSR_AMD64_SNP_VTOM BIT_ULL(3)
-#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4)
-#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5)
-#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6)
-#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7)
-#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8)
-#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9)
-#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10)
-#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11)
-#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12)
-#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14)
-#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16)
-#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17)
-
-/* SNP feature bits reserved for future use. */
-#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
-#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
-#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18)
+#define MSR_AMD64_SNP_VTOM_BIT 3
+#define MSR_AMD64_SNP_VTOM BIT_ULL(MSR_AMD64_SNP_VTOM_BIT)
+#define MSR_AMD64_SNP_REFLECT_VC_BIT 4
+#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(MSR_AMD64_SNP_REFLECT_VC_BIT)
+#define MSR_AMD64_SNP_RESTRICTED_INJ_BIT 5
+#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(MSR_AMD64_SNP_RESTRICTED_INJ_BIT)
+#define MSR_AMD64_SNP_ALT_INJ_BIT 6
+#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(MSR_AMD64_SNP_ALT_INJ_BIT)
+#define MSR_AMD64_SNP_DEBUG_SWAP_BIT 7
+#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(MSR_AMD64_SNP_DEBUG_SWAP_BIT)
+#define MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT 8
+#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT)
+#define MSR_AMD64_SNP_BTB_ISOLATION_BIT 9
+#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(MSR_AMD64_SNP_BTB_ISOLATION_BIT)
+#define MSR_AMD64_SNP_VMPL_SSS_BIT 10
+#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(MSR_AMD64_SNP_VMPL_SSS_BIT)
+#define MSR_AMD64_SNP_SECURE_TSC_BIT 11
+#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(MSR_AMD64_SNP_SECURE_TSC_BIT)
+#define MSR_AMD64_SNP_VMGEXIT_PARAM_BIT 12
+#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(MSR_AMD64_SNP_VMGEXIT_PARAM_BIT)
+#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
+#define MSR_AMD64_SNP_IBS_VIRT_BIT 14
+#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(MSR_AMD64_SNP_IBS_VIRT_BIT)
+#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
+#define MSR_AMD64_SNP_VMSA_REG_PROT_BIT 16
+#define MSR_AMD64_SNP_VMSA_REG_PROT BIT_ULL(MSR_AMD64_SNP_VMSA_REG_PROT_BIT)
+#define MSR_AMD64_SNP_SMT_PROT_BIT 17
+#define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT)
+#define MSR_AMD64_SNP_RESV_BIT 18
+#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+#define MSR_AMD64_RMP_BASE 0xc0010132
+#define MSR_AMD64_RMP_END 0xc0010133
+
/* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1
@@ -719,8 +740,15 @@
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_AMD64_SYSCFG 0xc0010010
-#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG_SNP_EN_BIT 24
+#define MSR_AMD64_SYSCFG_SNP_EN BIT_ULL(MSR_AMD64_SYSCFG_SNP_EN_BIT)
+#define MSR_AMD64_SYSCFG_SNP_VMPL_EN_BIT 25
+#define MSR_AMD64_SYSCFG_SNP_VMPL_EN BIT_ULL(MSR_AMD64_SYSCFG_SNP_VMPL_EN_BIT)
+#define MSR_AMD64_SYSCFG_MFDM_BIT 19
+#define MSR_AMD64_SYSCFG_MFDM BIT_ULL(MSR_AMD64_SYSCFG_MFDM_BIT)
+
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index 7ba1726b71c7..e9187ddd3d1f 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -99,6 +99,7 @@
#define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+#define REQUIRED_MASK21 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index a448d0964fc0..ef11aa4cab42 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -7,6 +7,8 @@
*
*/
+#include <linux/const.h>
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/stddef.h>
@@ -40,7 +42,6 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI
-#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_MSIX
#define __KVM_HAVE_MCE
#define __KVM_HAVE_PIT_STATE2
@@ -49,7 +50,6 @@
#define __KVM_HAVE_DEBUGREGS
#define __KVM_HAVE_XSAVE
#define __KVM_HAVE_XCRS
-#define __KVM_HAVE_READONLY_MEM
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -526,9 +526,301 @@ struct kvm_pmu_event_filter {
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
-#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0)
+#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)
#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
+/* for KVM_CAP_MCE */
+struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+};
+
+/* for KVM_CAP_XEN_HVM */
+#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
+#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
+#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
+#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
+
+struct kvm_xen_hvm_config {
+ __u32 flags;
+ __u32 msr;
+ __u64 blob_addr_32;
+ __u64 blob_addr_64;
+ __u8 blob_size_32;
+ __u8 blob_size_64;
+ __u8 pad2[30];
+};
+
+struct kvm_xen_hvm_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u8 long_mode;
+ __u8 vector;
+ __u8 runstate_update_flag;
+ union {
+ __u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
+ __u64 hva;
+ } shared_info;
+ struct {
+ __u32 send_port;
+ __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
+ __u32 flags;
+#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
+#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
+#define KVM_XEN_EVTCHN_RESET (1 << 2)
+ /*
+ * Events sent by the guest are either looped back to
+ * the guest itself (potentially on a different port#)
+ * or signalled via an eventfd.
+ */
+ union {
+ struct {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+ } port;
+ struct {
+ __u32 port; /* Zero for eventfd */
+ __s32 fd;
+ } eventfd;
+ __u32 padding[4];
+ } deliver;
+ } evtchn;
+ __u32 xen_version;
+ __u64 pad[8];
+ } u;
+};
+
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
+#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
+#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
+#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
+#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
+#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6
+
+struct kvm_xen_vcpu_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
+ __u64 hva;
+ __u64 pad[8];
+ struct {
+ __u64 state;
+ __u64 state_entry_time;
+ __u64 time_running;
+ __u64 time_runnable;
+ __u64 time_blocked;
+ __u64 time_offline;
+ } runstate;
+ __u32 vcpu_id;
+ struct {
+ __u32 port;
+ __u32 priority;
+ __u64 expires_ns;
+ } timer;
+ __u8 vector;
+ } u;
+};
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
+#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
+#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9
+
+/* Secure Encrypted Virtualization command */
+enum sev_cmd_id {
+ /* Guest initialization commands */
+ KVM_SEV_INIT = 0,
+ KVM_SEV_ES_INIT,
+ /* Guest launch commands */
+ KVM_SEV_LAUNCH_START,
+ KVM_SEV_LAUNCH_UPDATE_DATA,
+ KVM_SEV_LAUNCH_UPDATE_VMSA,
+ KVM_SEV_LAUNCH_SECRET,
+ KVM_SEV_LAUNCH_MEASURE,
+ KVM_SEV_LAUNCH_FINISH,
+ /* Guest migration commands (outgoing) */
+ KVM_SEV_SEND_START,
+ KVM_SEV_SEND_UPDATE_DATA,
+ KVM_SEV_SEND_UPDATE_VMSA,
+ KVM_SEV_SEND_FINISH,
+ /* Guest migration commands (incoming) */
+ KVM_SEV_RECEIVE_START,
+ KVM_SEV_RECEIVE_UPDATE_DATA,
+ KVM_SEV_RECEIVE_UPDATE_VMSA,
+ KVM_SEV_RECEIVE_FINISH,
+ /* Guest status and debug commands */
+ KVM_SEV_GUEST_STATUS,
+ KVM_SEV_DBG_DECRYPT,
+ KVM_SEV_DBG_ENCRYPT,
+ /* Guest certificates commands */
+ KVM_SEV_CERT_EXPORT,
+ /* Attestation report */
+ KVM_SEV_GET_ATTESTATION_REPORT,
+ /* Guest Migration Extension */
+ KVM_SEV_SEND_CANCEL,
+
+ KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_cmd {
+ __u32 id;
+ __u32 pad0;
+ __u64 data;
+ __u32 error;
+ __u32 sev_fd;
+};
+
+struct kvm_sev_launch_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 dh_uaddr;
+ __u32 dh_len;
+ __u32 pad0;
+ __u64 session_uaddr;
+ __u32 session_len;
+ __u32 pad1;
+};
+
+struct kvm_sev_launch_update_data {
+ __u64 uaddr;
+ __u32 len;
+ __u32 pad0;
+};
+
+
+struct kvm_sev_launch_secret {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u32 pad0;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u32 pad1;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+ __u32 pad2;
+};
+
+struct kvm_sev_launch_measure {
+ __u64 uaddr;
+ __u32 len;
+ __u32 pad0;
+};
+
+struct kvm_sev_guest_status {
+ __u32 handle;
+ __u32 policy;
+ __u32 state;
+};
+
+struct kvm_sev_dbg {
+ __u64 src_uaddr;
+ __u64 dst_uaddr;
+ __u32 len;
+ __u32 pad0;
+};
+
+struct kvm_sev_attestation_report {
+ __u8 mnonce[16];
+ __u64 uaddr;
+ __u32 len;
+ __u32 pad0;
+};
+
+struct kvm_sev_send_start {
+ __u32 policy;
+ __u32 pad0;
+ __u64 pdh_cert_uaddr;
+ __u32 pdh_cert_len;
+ __u32 pad1;
+ __u64 plat_certs_uaddr;
+ __u32 plat_certs_len;
+ __u32 pad2;
+ __u64 amd_certs_uaddr;
+ __u32 amd_certs_len;
+ __u32 pad3;
+ __u64 session_uaddr;
+ __u32 session_len;
+ __u32 pad4;
+};
+
+struct kvm_sev_send_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u32 pad0;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u32 pad1;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+ __u32 pad2;
+};
+
+struct kvm_sev_receive_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 pdh_uaddr;
+ __u32 pdh_len;
+ __u32 pad0;
+ __u64 session_uaddr;
+ __u32 session_len;
+ __u32 pad1;
+};
+
+struct kvm_sev_receive_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u32 pad0;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u32 pad1;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+ __u32 pad2;
+};
+
+#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+
+struct kvm_hyperv_eventfd {
+ __u32 conn_id;
+ __s32 fd;
+ __u32 flags;
+ __u32 padding[3];
+};
+
+#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
+#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+
/*
* Masked event layout.
* Bits Description
@@ -549,10 +841,10 @@ struct kvm_pmu_event_filter {
((__u64)(!!(exclude)) << 55))
#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \
- (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32))
-#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56))
-#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8))
-#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55))
+ (__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8))
+#define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55))
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)
/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
@@ -560,7 +852,7 @@ struct kvm_pmu_event_filter {
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
/* x86-specific KVM_EXIT_HYPERCALL flags. */
-#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
+#define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0)
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 4fa4ade1ce74..540c0f2c4fda 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -121,7 +121,7 @@ static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
int i, n;
/* recognize hard coded LLVM section name */
- if (strcmp(sec_name, ".arena.1") == 0) {
+ if (strcmp(sec_name, ".addr_space.1") == 0) {
/* this is the name to use in skeleton */
snprintf(buf, buf_sz, "arena");
return true;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 318e2dad27e0..ae57bf69ad4a 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -76,6 +76,12 @@ enum {
DNS
};
+enum {
+ IPV4 = 1,
+ IPV6,
+ IP_TYPE_MAX
+};
+
static int in_hand_shake;
static char *os_name = "";
@@ -102,6 +108,11 @@ static struct utsname uts_buf;
#define MAX_FILE_NAME 100
#define ENTRIES_PER_BLOCK 50
+/*
+ * Change this entry if the number of addresses increases in future
+ */
+#define MAX_IP_ENTRIES 64
+#define OUTSTR_BUF_SIZE ((INET6_ADDRSTRLEN + 1) * MAX_IP_ENTRIES)
struct kvp_record {
char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
@@ -1171,6 +1182,18 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
return 0;
}
+int ip_version_check(const char *input_addr)
+{
+ struct in6_addr addr;
+
+ if (inet_pton(AF_INET, input_addr, &addr))
+ return IPV4;
+ else if (inet_pton(AF_INET6, input_addr, &addr))
+ return IPV6;
+
+ return -EINVAL;
+}
+
/*
* Only IPv4 subnet strings needs to be converted to plen
* For IPv6 the subnet is already privided in plen format
@@ -1197,14 +1220,75 @@ static int kvp_subnet_to_plen(char *subnet_addr_str)
return plen;
}
+static int process_dns_gateway_nm(FILE *f, char *ip_string, int type,
+ int ip_sec)
+{
+ char addr[INET6_ADDRSTRLEN], *output_str;
+ int ip_offset = 0, error = 0, ip_ver;
+ char *param_name;
+
+ if (type == DNS)
+ param_name = "dns";
+ else if (type == GATEWAY)
+ param_name = "gateway";
+ else
+ return -EINVAL;
+
+ output_str = (char *)calloc(OUTSTR_BUF_SIZE, sizeof(char));
+ if (!output_str)
+ return -ENOMEM;
+
+ while (1) {
+ memset(addr, 0, sizeof(addr));
+
+ if (!parse_ip_val_buffer(ip_string, &ip_offset, addr,
+ (MAX_IP_ADDR_SIZE * 2)))
+ break;
+
+ ip_ver = ip_version_check(addr);
+ if (ip_ver < 0)
+ continue;
+
+ if ((ip_ver == IPV4 && ip_sec == IPV4) ||
+ (ip_ver == IPV6 && ip_sec == IPV6)) {
+ /*
+ * do a bound check to avoid out-of bound writes
+ */
+ if ((OUTSTR_BUF_SIZE - strlen(output_str)) >
+ (strlen(addr) + 1)) {
+ strncat(output_str, addr,
+ OUTSTR_BUF_SIZE -
+ strlen(output_str) - 1);
+ strncat(output_str, ",",
+ OUTSTR_BUF_SIZE -
+ strlen(output_str) - 1);
+ }
+ } else {
+ continue;
+ }
+ }
+
+ if (strlen(output_str)) {
+ /*
+ * This is to get rid of that extra comma character
+ * in the end of the string
+ */
+ output_str[strlen(output_str) - 1] = '\0';
+ error = fprintf(f, "%s=%s\n", param_name, output_str);
+ }
+
+ free(output_str);
+ return error;
+}
+
static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
- int is_ipv6)
+ int ip_sec)
{
char addr[INET6_ADDRSTRLEN];
char subnet_addr[INET6_ADDRSTRLEN];
- int error, i = 0;
+ int error = 0, i = 0;
int ip_offset = 0, subnet_offset = 0;
- int plen;
+ int plen, ip_ver;
memset(addr, 0, sizeof(addr));
memset(subnet_addr, 0, sizeof(subnet_addr));
@@ -1216,10 +1300,16 @@ static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
subnet_addr,
(MAX_IP_ADDR_SIZE *
2))) {
- if (!is_ipv6)
+ ip_ver = ip_version_check(addr);
+ if (ip_ver < 0)
+ continue;
+
+ if (ip_ver == IPV4 && ip_sec == IPV4)
plen = kvp_subnet_to_plen((char *)subnet_addr);
- else
+ else if (ip_ver == IPV6 && ip_sec == IPV6)
plen = atoi(subnet_addr);
+ else
+ continue;
if (plen < 0)
return plen;
@@ -1233,17 +1323,16 @@ static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
memset(subnet_addr, 0, sizeof(subnet_addr));
}
- return 0;
+ return error;
}
static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
{
- int error = 0;
+ int error = 0, ip_ver;
char if_filename[PATH_MAX];
char nm_filename[PATH_MAX];
FILE *ifcfg_file, *nmfile;
char cmd[PATH_MAX];
- int is_ipv6 = 0;
char *mac_addr;
int str_len;
@@ -1421,52 +1510,94 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
if (error)
goto setval_error;
- if (new_val->addr_family & ADDR_FAMILY_IPV6) {
- error = fprintf(nmfile, "\n[ipv6]\n");
- if (error < 0)
- goto setval_error;
- is_ipv6 = 1;
- } else {
- error = fprintf(nmfile, "\n[ipv4]\n");
- if (error < 0)
- goto setval_error;
- }
-
/*
* Now we populate the keyfile format
+ *
+ * The keyfile format expects the IPv6 and IPv4 configuration in
+ * different sections. Therefore we iterate through the list twice,
+ * once to populate the IPv4 section and the next time for IPv6
*/
+ ip_ver = IPV4;
+ do {
+ if (ip_ver == IPV4) {
+ error = fprintf(nmfile, "\n[ipv4]\n");
+ if (error < 0)
+ goto setval_error;
+ } else {
+ error = fprintf(nmfile, "\n[ipv6]\n");
+ if (error < 0)
+ goto setval_error;
+ }
- if (new_val->dhcp_enabled) {
- error = kvp_write_file(nmfile, "method", "", "auto");
- if (error < 0)
- goto setval_error;
- } else {
- error = kvp_write_file(nmfile, "method", "", "manual");
+ /*
+ * Write the configuration for ipaddress, netmask, gateway and
+ * name services
+ */
+ error = process_ip_string_nm(nmfile, (char *)new_val->ip_addr,
+ (char *)new_val->sub_net,
+ ip_ver);
if (error < 0)
goto setval_error;
- }
- /*
- * Write the configuration for ipaddress, netmask, gateway and
- * name services
- */
- error = process_ip_string_nm(nmfile, (char *)new_val->ip_addr,
- (char *)new_val->sub_net, is_ipv6);
- if (error < 0)
- goto setval_error;
+ /*
+ * As dhcp_enabled is only valid for ipv4, we do not set dhcp
+ * methods for ipv6 based on dhcp_enabled flag.
+ *
+ * For ipv4, set method to manual only when dhcp_enabled is
+ * false and specific ipv4 addresses are configured. If neither
+ * dhcp_enabled is true and no ipv4 addresses are configured,
+ * set method to 'disabled'.
+ *
+ * For ipv6, set method to manual when we configure ipv6
+ * addresses. Otherwise set method to 'auto' so that SLAAC from
+ * RA may be used.
+ */
+ if (ip_ver == IPV4) {
+ if (new_val->dhcp_enabled) {
+ error = kvp_write_file(nmfile, "method", "",
+ "auto");
+ if (error < 0)
+ goto setval_error;
+ } else if (error) {
+ error = kvp_write_file(nmfile, "method", "",
+ "manual");
+ if (error < 0)
+ goto setval_error;
+ } else {
+ error = kvp_write_file(nmfile, "method", "",
+ "disabled");
+ if (error < 0)
+ goto setval_error;
+ }
+ } else if (ip_ver == IPV6) {
+ if (error) {
+ error = kvp_write_file(nmfile, "method", "",
+ "manual");
+ if (error < 0)
+ goto setval_error;
+ } else {
+ error = kvp_write_file(nmfile, "method", "",
+ "auto");
+ if (error < 0)
+ goto setval_error;
+ }
+ }
- /* we do not want ipv4 addresses in ipv6 section and vice versa */
- if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) {
- error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
+ error = process_dns_gateway_nm(nmfile,
+ (char *)new_val->gate_way,
+ GATEWAY, ip_ver);
if (error < 0)
goto setval_error;
- }
- if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) {
- error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
+ error = process_dns_gateway_nm(nmfile,
+ (char *)new_val->dns_addr, DNS,
+ ip_ver);
if (error < 0)
goto setval_error;
- }
+
+ ip_ver++;
+ } while (ip_ver < IP_TYPE_MAX);
+
fclose(nmfile);
fclose(ifcfg_file);
diff --git a/tools/include/asm-generic/bitops/__fls.h b/tools/include/asm-generic/bitops/__fls.h
index 03f721a8a2b1..54ccccf96e21 100644
--- a/tools/include/asm-generic/bitops/__fls.h
+++ b/tools/include/asm-generic/bitops/__fls.h
@@ -5,12 +5,12 @@
#include <asm/types.h>
/**
- * __fls - find last (most-significant) set bit in a long word
+ * generic___fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long generic___fls(unsigned long word)
{
int num = BITS_PER_LONG - 1;
@@ -41,4 +41,8 @@ static __always_inline unsigned long __fls(unsigned long word)
return num;
}
+#ifndef __HAVE_ARCH___FLS
+#define __fls(word) generic___fls(word)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/tools/include/asm-generic/bitops/fls.h b/tools/include/asm-generic/bitops/fls.h
index b168bb10e1be..26f3ce1dd6e4 100644
--- a/tools/include/asm-generic/bitops/fls.h
+++ b/tools/include/asm-generic/bitops/fls.h
@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
- * fls - find last (most-significant) bit set
+ * generic_fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline int generic_fls(unsigned int x)
{
int r = 32;
@@ -39,4 +39,8 @@ static __always_inline int fls(unsigned int x)
return r;
}
+#ifndef __HAVE_ARCH_FLS
+#define fls(x) generic_fls(x)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 72535f00572f..72ea363d434d 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -3,6 +3,8 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+#include <linux/types.h> /* for u32 */
+
struct btf_id_set {
u32 cnt;
u32 ids[];
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 4b0673bf52c2..07cfad817d53 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -8,6 +8,7 @@
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/math.h>
+#include <linux/panic.h>
#include <endian.h>
#include <byteswap.h>
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index f3c82ab5b14c..7d73da098047 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -37,4 +37,9 @@ static inline void totalram_pages_add(long count)
{
}
+static inline int early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+
#endif
diff --git a/tools/include/linux/panic.h b/tools/include/linux/panic.h
new file mode 100644
index 000000000000..9c8f17a41ce8
--- /dev/null
+++ b/tools/include/linux/panic.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_PANIC_H
+#define _TOOLS_LINUX_PANIC_H
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static inline void panic(const char *fmt, ...)
+{
+ va_list argp;
+
+ va_start(argp, fmt);
+ vfprintf(stderr, fmt, argp);
+ va_end(argp);
+ exit(-1);
+}
+
+#endif
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index fd4f9574d177..2ee338860b7e 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -3013,6 +3013,7 @@ struct drm_i915_query_item {
* - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
* - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
* - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
*/
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
@@ -3021,6 +3022,7 @@ struct drm_i915_query_item {
#define DRM_I915_QUERY_MEMORY_REGIONS 4
#define DRM_I915_QUERY_HWCONFIG_BLOB 5
#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
+#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7
/* Must be kept compact -- no holes and well documented */
/**
@@ -3567,6 +3569,20 @@ struct drm_i915_query_memory_regions {
};
/**
+ * struct drm_i915_query_guc_submission_version - query GuC submission interface version
+ */
+struct drm_i915_query_guc_submission_version {
+ /** @branch: Firmware branch version. */
+ __u32 branch;
+ /** @major: Firmware major version. */
+ __u32 major;
+ /** @minor: Firmware minor version. */
+ __u32 minor;
+ /** @patch: Firmware patch version. */
+ __u32 patch;
+};
+
+/**
* DOC: GuC HWCONFIG blob uAPI
*
* The GuC produces a blob with information about the current device.
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 48ad69f7722e..45e4e64fd664 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -64,6 +64,24 @@ struct fstrim_range {
__u64 minlen;
};
+/*
+ * We include a length field because some filesystems (vfat) have an identifier
+ * that we do want to expose as a UUID, but doesn't have the standard length.
+ *
+ * We use a fixed size buffer beacuse this interface will, by fiat, never
+ * support "UUIDs" longer than 16 bytes; we don't want to force all downstream
+ * users to have to deal with that.
+ */
+struct fsuuid2 {
+ __u8 len;
+ __u8 uuid[16];
+};
+
+struct fs_sysfs_path {
+ __u8 len;
+ __u8 name[128];
+};
+
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1
@@ -215,6 +233,13 @@ struct fsxattr {
#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
+/* Returns the external filesystem UUID, the same one blkid returns */
+#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2)
+/*
+ * Returns the path component under /sys/fs/ that refers to this filesystem;
+ * also /sys/kernel/debug/ for filesystems with debugfs exports
+ */
+#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
@@ -301,9 +326,12 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO O_APPEND */
#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+/* per-IO negation of O_APPEND */
+#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
- RWF_APPEND)
+ RWF_APPEND | RWF_NOAPPEND)
/* Pagemap ioctl */
#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index c3308536482b..2190adbe3002 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -16,6 +16,11 @@
#define KVM_API_VERSION 12
+/*
+ * Backwards-compatible definitions.
+ */
+#define __KVM_HAVE_GUEST_DEBUG
+
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@@ -85,43 +90,6 @@ struct kvm_pit_config {
#define KVM_PIT_SPEAKER_DUMMY 1
-struct kvm_s390_skeys {
- __u64 start_gfn;
- __u64 count;
- __u64 skeydata_addr;
- __u32 flags;
- __u32 reserved[9];
-};
-
-#define KVM_S390_CMMA_PEEK (1 << 0)
-
-/**
- * kvm_s390_cmma_log - Used for CMMA migration.
- *
- * Used both for input and output.
- *
- * @start_gfn: Guest page number to start from.
- * @count: Size of the result buffer.
- * @flags: Control operation mode via KVM_S390_CMMA_* flags
- * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
- * pages are still remaining.
- * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
- * in the PGSTE.
- * @values: Pointer to the values buffer.
- *
- * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
- */
-struct kvm_s390_cmma_log {
- __u64 start_gfn;
- __u32 count;
- __u32 flags;
- union {
- __u64 remaining;
- __u64 mask;
- };
- __u64 values;
-};
-
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
@@ -315,11 +283,6 @@ struct kvm_run {
__u32 ipb;
} s390_sieic;
/* KVM_EXIT_S390_RESET */
-#define KVM_S390_RESET_POR 1
-#define KVM_S390_RESET_CLEAR 2
-#define KVM_S390_RESET_SUBSYSTEM 4
-#define KVM_S390_RESET_CPU_INIT 8
-#define KVM_S390_RESET_IPL 16
__u64 s390_reset_flags;
/* KVM_EXIT_S390_UCONTROL */
struct {
@@ -536,43 +499,6 @@ struct kvm_translation {
__u8 pad[5];
};
-/* for KVM_S390_MEM_OP */
-struct kvm_s390_mem_op {
- /* in */
- __u64 gaddr; /* the guest address */
- __u64 flags; /* flags */
- __u32 size; /* amount of bytes */
- __u32 op; /* type of operation */
- __u64 buf; /* buffer in userspace */
- union {
- struct {
- __u8 ar; /* the access register number */
- __u8 key; /* access key, ignored if flag unset */
- __u8 pad1[6]; /* ignored */
- __u64 old_addr; /* ignored if cmpxchg flag unset */
- };
- __u32 sida_offset; /* offset into the sida */
- __u8 reserved[32]; /* ignored */
- };
-};
-/* types for kvm_s390_mem_op->op */
-#define KVM_S390_MEMOP_LOGICAL_READ 0
-#define KVM_S390_MEMOP_LOGICAL_WRITE 1
-#define KVM_S390_MEMOP_SIDA_READ 2
-#define KVM_S390_MEMOP_SIDA_WRITE 3
-#define KVM_S390_MEMOP_ABSOLUTE_READ 4
-#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
-#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
-
-/* flags for kvm_s390_mem_op->flags */
-#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
-#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
-#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
-
-/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
-#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
-#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
-
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
@@ -637,124 +563,6 @@ struct kvm_mp_state {
__u32 mp_state;
};
-struct kvm_s390_psw {
- __u64 mask;
- __u64 addr;
-};
-
-/* valid values for type in kvm_s390_interrupt */
-#define KVM_S390_SIGP_STOP 0xfffe0000u
-#define KVM_S390_PROGRAM_INT 0xfffe0001u
-#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
-#define KVM_S390_RESTART 0xfffe0003u
-#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
-#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
-#define KVM_S390_MCHK 0xfffe1000u
-#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
-#define KVM_S390_INT_CPU_TIMER 0xffff1005u
-#define KVM_S390_INT_VIRTIO 0xffff2603u
-#define KVM_S390_INT_SERVICE 0xffff2401u
-#define KVM_S390_INT_EMERGENCY 0xffff1201u
-#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
-/* Anything below 0xfffe0000u is taken by INT_IO */
-#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
- (((schid)) | \
- ((ssid) << 16) | \
- ((cssid) << 18) | \
- ((ai) << 26))
-#define KVM_S390_INT_IO_MIN 0x00000000u
-#define KVM_S390_INT_IO_MAX 0xfffdffffu
-#define KVM_S390_INT_IO_AI_MASK 0x04000000u
-
-
-struct kvm_s390_interrupt {
- __u32 type;
- __u32 parm;
- __u64 parm64;
-};
-
-struct kvm_s390_io_info {
- __u16 subchannel_id;
- __u16 subchannel_nr;
- __u32 io_int_parm;
- __u32 io_int_word;
-};
-
-struct kvm_s390_ext_info {
- __u32 ext_params;
- __u32 pad;
- __u64 ext_params2;
-};
-
-struct kvm_s390_pgm_info {
- __u64 trans_exc_code;
- __u64 mon_code;
- __u64 per_address;
- __u32 data_exc_code;
- __u16 code;
- __u16 mon_class_nr;
- __u8 per_code;
- __u8 per_atmid;
- __u8 exc_access_id;
- __u8 per_access_id;
- __u8 op_access_id;
-#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
-#define KVM_S390_PGM_FLAGS_ILC_0 0x02
-#define KVM_S390_PGM_FLAGS_ILC_1 0x04
-#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
-#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
- __u8 flags;
- __u8 pad[2];
-};
-
-struct kvm_s390_prefix_info {
- __u32 address;
-};
-
-struct kvm_s390_extcall_info {
- __u16 code;
-};
-
-struct kvm_s390_emerg_info {
- __u16 code;
-};
-
-#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
-struct kvm_s390_stop_info {
- __u32 flags;
-};
-
-struct kvm_s390_mchk_info {
- __u64 cr14;
- __u64 mcic;
- __u64 failing_storage_address;
- __u32 ext_damage_code;
- __u32 pad;
- __u8 fixed_logout[16];
-};
-
-struct kvm_s390_irq {
- __u64 type;
- union {
- struct kvm_s390_io_info io;
- struct kvm_s390_ext_info ext;
- struct kvm_s390_pgm_info pgm;
- struct kvm_s390_emerg_info emerg;
- struct kvm_s390_extcall_info extcall;
- struct kvm_s390_prefix_info prefix;
- struct kvm_s390_stop_info stop;
- struct kvm_s390_mchk_info mchk;
- char reserved[64];
- } u;
-};
-
-struct kvm_s390_irq_state {
- __u64 buf;
- __u32 flags; /* will stay unused for compatibility reasons */
- __u32 len;
- __u32 reserved[4]; /* will stay unused for compatibility reasons */
-};
-
/* for KVM_SET_GUEST_DEBUG */
#define KVM_GUESTDBG_ENABLE 0x00000001
@@ -810,50 +618,6 @@ struct kvm_enable_cap {
__u8 pad[64];
};
-/* for KVM_PPC_GET_PVINFO */
-
-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
-
-struct kvm_ppc_pvinfo {
- /* out */
- __u32 flags;
- __u32 hcall[4];
- __u8 pad[108];
-};
-
-/* for KVM_PPC_GET_SMMU_INFO */
-#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
-
-struct kvm_ppc_one_page_size {
- __u32 page_shift; /* Page shift (or 0) */
- __u32 pte_enc; /* Encoding in the HPTE (>>12) */
-};
-
-struct kvm_ppc_one_seg_page_size {
- __u32 page_shift; /* Base page shift of segment (or 0) */
- __u32 slb_enc; /* SLB encoding for BookS */
- struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
-};
-
-#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
-#define KVM_PPC_1T_SEGMENTS 0x00000002
-#define KVM_PPC_NO_HASH 0x00000004
-
-struct kvm_ppc_smmu_info {
- __u64 flags;
- __u32 slb_size;
- __u16 data_keys; /* # storage keys supported for data */
- __u16 instr_keys; /* # storage keys supported for instructions */
- struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
-};
-
-/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
-struct kvm_ppc_resize_hpt {
- __u64 flags;
- __u32 shift;
- __u32 pad;
-};
-
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -923,9 +687,7 @@ struct kvm_ppc_resize_hpt {
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_USER_NMI 22
-#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23
-#endif
#ifdef __KVM_HAVE_PIT
#define KVM_CAP_REINJECT_CONTROL 24
#endif
@@ -1156,8 +918,6 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
-#ifdef KVM_CAP_IRQ_ROUTING
-
struct kvm_irq_routing_irqchip {
__u32 irqchip;
__u32 pin;
@@ -1222,42 +982,6 @@ struct kvm_irq_routing {
struct kvm_irq_routing_entry entries[];
};
-#endif
-
-#ifdef KVM_CAP_MCE
-/* x86 MCE */
-struct kvm_x86_mce {
- __u64 status;
- __u64 addr;
- __u64 misc;
- __u64 mcg_status;
- __u8 bank;
- __u8 pad1[7];
- __u64 pad2[3];
-};
-#endif
-
-#ifdef KVM_CAP_XEN_HVM
-#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
-#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
-#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
-#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
-#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
-#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
-#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
-#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
-
-struct kvm_xen_hvm_config {
- __u32 flags;
- __u32 msr;
- __u64 blob_addr_32;
- __u64 blob_addr_64;
- __u8 blob_size_32;
- __u8 blob_size_64;
- __u8 pad2[30];
-};
-#endif
-
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
/*
* Available with KVM_CAP_IRQFD_RESAMPLE
@@ -1442,11 +1166,6 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
-struct kvm_s390_ucas_mapping {
- __u64 user_addr;
- __u64 vcpu_addr;
- __u64 length;
-};
#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
@@ -1641,89 +1360,6 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-struct kvm_s390_pv_sec_parm {
- __u64 origin;
- __u64 length;
-};
-
-struct kvm_s390_pv_unp {
- __u64 addr;
- __u64 size;
- __u64 tweak;
-};
-
-enum pv_cmd_dmp_id {
- KVM_PV_DUMP_INIT,
- KVM_PV_DUMP_CONFIG_STOR_STATE,
- KVM_PV_DUMP_COMPLETE,
- KVM_PV_DUMP_CPU,
-};
-
-struct kvm_s390_pv_dmp {
- __u64 subcmd;
- __u64 buff_addr;
- __u64 buff_len;
- __u64 gaddr; /* For dump storage state */
- __u64 reserved[4];
-};
-
-enum pv_cmd_info_id {
- KVM_PV_INFO_VM,
- KVM_PV_INFO_DUMP,
-};
-
-struct kvm_s390_pv_info_dump {
- __u64 dump_cpu_buffer_len;
- __u64 dump_config_mem_buffer_per_1m;
- __u64 dump_config_finalize_len;
-};
-
-struct kvm_s390_pv_info_vm {
- __u64 inst_calls_list[4];
- __u64 max_cpus;
- __u64 max_guests;
- __u64 max_guest_addr;
- __u64 feature_indication;
-};
-
-struct kvm_s390_pv_info_header {
- __u32 id;
- __u32 len_max;
- __u32 len_written;
- __u32 reserved;
-};
-
-struct kvm_s390_pv_info {
- struct kvm_s390_pv_info_header header;
- union {
- struct kvm_s390_pv_info_dump dump;
- struct kvm_s390_pv_info_vm vm;
- };
-};
-
-enum pv_cmd_id {
- KVM_PV_ENABLE,
- KVM_PV_DISABLE,
- KVM_PV_SET_SEC_PARMS,
- KVM_PV_UNPACK,
- KVM_PV_VERIFY,
- KVM_PV_PREP_RESET,
- KVM_PV_UNSHARE_ALL,
- KVM_PV_INFO,
- KVM_PV_DUMP,
- KVM_PV_ASYNC_CLEANUP_PREPARE,
- KVM_PV_ASYNC_CLEANUP_PERFORM,
-};
-
-struct kvm_pv_cmd {
- __u32 cmd; /* Command to be executed */
- __u16 rc; /* Ultravisor return code */
- __u16 rrc; /* Ultravisor return reason code */
- __u64 data; /* Data or address */
- __u32 flags; /* flags for future extensions. Must be 0 for now */
- __u32 reserved[3];
-};
-
/* Available with KVM_CAP_S390_PROTECTED */
#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
@@ -1737,58 +1373,6 @@ struct kvm_pv_cmd {
#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
-struct kvm_xen_hvm_attr {
- __u16 type;
- __u16 pad[3];
- union {
- __u8 long_mode;
- __u8 vector;
- __u8 runstate_update_flag;
- struct {
- __u64 gfn;
-#define KVM_XEN_INVALID_GFN ((__u64)-1)
- } shared_info;
- struct {
- __u32 send_port;
- __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
- __u32 flags;
-#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
-#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
-#define KVM_XEN_EVTCHN_RESET (1 << 2)
- /*
- * Events sent by the guest are either looped back to
- * the guest itself (potentially on a different port#)
- * or signalled via an eventfd.
- */
- union {
- struct {
- __u32 port;
- __u32 vcpu;
- __u32 priority;
- } port;
- struct {
- __u32 port; /* Zero for eventfd */
- __s32 fd;
- } eventfd;
- __u32 padding[4];
- } deliver;
- } evtchn;
- __u32 xen_version;
- __u64 pad[8];
- } u;
-};
-
-
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
-#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
-#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
-#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
-#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
-#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
-#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
-
/* Per-vCPU Xen attributes */
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
@@ -1799,242 +1383,6 @@ struct kvm_xen_hvm_attr {
#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
-struct kvm_xen_vcpu_attr {
- __u16 type;
- __u16 pad[3];
- union {
- __u64 gpa;
-#define KVM_XEN_INVALID_GPA ((__u64)-1)
- __u64 pad[8];
- struct {
- __u64 state;
- __u64 state_entry_time;
- __u64 time_running;
- __u64 time_runnable;
- __u64 time_blocked;
- __u64 time_offline;
- } runstate;
- __u32 vcpu_id;
- struct {
- __u32 port;
- __u32 priority;
- __u64 expires_ns;
- } timer;
- __u8 vector;
- } u;
-};
-
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
-#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
-#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
-
-/* Secure Encrypted Virtualization command */
-enum sev_cmd_id {
- /* Guest initialization commands */
- KVM_SEV_INIT = 0,
- KVM_SEV_ES_INIT,
- /* Guest launch commands */
- KVM_SEV_LAUNCH_START,
- KVM_SEV_LAUNCH_UPDATE_DATA,
- KVM_SEV_LAUNCH_UPDATE_VMSA,
- KVM_SEV_LAUNCH_SECRET,
- KVM_SEV_LAUNCH_MEASURE,
- KVM_SEV_LAUNCH_FINISH,
- /* Guest migration commands (outgoing) */
- KVM_SEV_SEND_START,
- KVM_SEV_SEND_UPDATE_DATA,
- KVM_SEV_SEND_UPDATE_VMSA,
- KVM_SEV_SEND_FINISH,
- /* Guest migration commands (incoming) */
- KVM_SEV_RECEIVE_START,
- KVM_SEV_RECEIVE_UPDATE_DATA,
- KVM_SEV_RECEIVE_UPDATE_VMSA,
- KVM_SEV_RECEIVE_FINISH,
- /* Guest status and debug commands */
- KVM_SEV_GUEST_STATUS,
- KVM_SEV_DBG_DECRYPT,
- KVM_SEV_DBG_ENCRYPT,
- /* Guest certificates commands */
- KVM_SEV_CERT_EXPORT,
- /* Attestation report */
- KVM_SEV_GET_ATTESTATION_REPORT,
- /* Guest Migration Extension */
- KVM_SEV_SEND_CANCEL,
-
- KVM_SEV_NR_MAX,
-};
-
-struct kvm_sev_cmd {
- __u32 id;
- __u64 data;
- __u32 error;
- __u32 sev_fd;
-};
-
-struct kvm_sev_launch_start {
- __u32 handle;
- __u32 policy;
- __u64 dh_uaddr;
- __u32 dh_len;
- __u64 session_uaddr;
- __u32 session_len;
-};
-
-struct kvm_sev_launch_update_data {
- __u64 uaddr;
- __u32 len;
-};
-
-
-struct kvm_sev_launch_secret {
- __u64 hdr_uaddr;
- __u32 hdr_len;
- __u64 guest_uaddr;
- __u32 guest_len;
- __u64 trans_uaddr;
- __u32 trans_len;
-};
-
-struct kvm_sev_launch_measure {
- __u64 uaddr;
- __u32 len;
-};
-
-struct kvm_sev_guest_status {
- __u32 handle;
- __u32 policy;
- __u32 state;
-};
-
-struct kvm_sev_dbg {
- __u64 src_uaddr;
- __u64 dst_uaddr;
- __u32 len;
-};
-
-struct kvm_sev_attestation_report {
- __u8 mnonce[16];
- __u64 uaddr;
- __u32 len;
-};
-
-struct kvm_sev_send_start {
- __u32 policy;
- __u64 pdh_cert_uaddr;
- __u32 pdh_cert_len;
- __u64 plat_certs_uaddr;
- __u32 plat_certs_len;
- __u64 amd_certs_uaddr;
- __u32 amd_certs_len;
- __u64 session_uaddr;
- __u32 session_len;
-};
-
-struct kvm_sev_send_update_data {
- __u64 hdr_uaddr;
- __u32 hdr_len;
- __u64 guest_uaddr;
- __u32 guest_len;
- __u64 trans_uaddr;
- __u32 trans_len;
-};
-
-struct kvm_sev_receive_start {
- __u32 handle;
- __u32 policy;
- __u64 pdh_uaddr;
- __u32 pdh_len;
- __u64 session_uaddr;
- __u32 session_len;
-};
-
-struct kvm_sev_receive_update_data {
- __u64 hdr_uaddr;
- __u32 hdr_len;
- __u64 guest_uaddr;
- __u32 guest_len;
- __u64 trans_uaddr;
- __u32 trans_len;
-};
-
-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
-#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
-
-struct kvm_assigned_pci_dev {
- __u32 assigned_dev_id;
- __u32 busnr;
- __u32 devfn;
- __u32 flags;
- __u32 segnr;
- union {
- __u32 reserved[11];
- };
-};
-
-#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
-#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
-#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
-
-#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
-#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
-#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
-
-#define KVM_DEV_IRQ_HOST_MASK 0x00ff
-#define KVM_DEV_IRQ_GUEST_MASK 0xff00
-
-struct kvm_assigned_irq {
- __u32 assigned_dev_id;
- __u32 host_irq; /* ignored (legacy field) */
- __u32 guest_irq;
- __u32 flags;
- union {
- __u32 reserved[12];
- };
-};
-
-struct kvm_assigned_msix_nr {
- __u32 assigned_dev_id;
- __u16 entry_nr;
- __u16 padding;
-};
-
-#define KVM_MAX_MSIX_PER_DEV 256
-struct kvm_assigned_msix_entry {
- __u32 assigned_dev_id;
- __u32 gsi;
- __u16 entry; /* The index of entry in the MSI-X table */
- __u16 padding[3];
-};
-
-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
-
-/* Available with KVM_CAP_ARM_USER_IRQ */
-
-/* Bits for run->s.regs.device_irq_level */
-#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
-#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
-#define KVM_ARM_DEV_PMU (1 << 2)
-
-struct kvm_hyperv_eventfd {
- __u32 conn_id;
- __s32 fd;
- __u32 flags;
- __u32 padding[3];
-};
-
-#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
-#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
-
#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
@@ -2180,33 +1528,6 @@ struct kvm_stats_desc {
/* Available with KVM_CAP_S390_ZPCI_OP */
#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
-struct kvm_s390_zpci_op {
- /* in */
- __u32 fh; /* target device */
- __u8 op; /* operation to perform */
- __u8 pad[3];
- union {
- /* for KVM_S390_ZPCIOP_REG_AEN */
- struct {
- __u64 ibv; /* Guest addr of interrupt bit vector */
- __u64 sb; /* Guest addr of summary bit */
- __u32 flags;
- __u32 noi; /* Number of interrupts */
- __u8 isc; /* Guest interrupt subclass */
- __u8 sbo; /* Offset of guest summary bit vector */
- __u16 pad;
- } reg_aen;
- __u64 reserved[8];
- } u;
-};
-
-/* types for kvm_s390_zpci_op->op */
-#define KVM_S390_ZPCIOP_REG_AEN 0
-#define KVM_S390_ZPCIOP_DEREG_AEN 1
-
-/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
-#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
-
/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index d5b9cfbd9cea..628d46a0da92 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 16)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -416,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
+ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index efab29b8935b..a2061fcd612d 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -498,7 +498,7 @@ struct bpf_struct_ops {
#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
-#define ARENA_SEC ".arena.1"
+#define ARENA_SEC ".addr_space.1"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -1650,6 +1650,10 @@ static int sys_memfd_create(const char *name, unsigned flags)
return syscall(__NR_memfd_create, name, flags);
}
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 0x0001U
+#endif
+
static int create_placeholder_fd(void)
{
int fd;
@@ -5352,8 +5356,8 @@ retry:
goto err_out;
}
if (map->def.type == BPF_MAP_TYPE_ARENA) {
- map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
- PROT_READ | PROT_WRITE,
+ map->mmaped = mmap((void *)(long)map->map_extra,
+ bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
map->fd, 0);
if (map->mmaped == MAP_FAILED) {
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 5fa7957f6e0f..25810e18b0a7 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -182,6 +182,7 @@ class NlMsg:
self.done = 1
extack_off = 20
elif self.nl_type == Netlink.NLMSG_DONE:
+ self.error = struct.unpack("i", self.raw[0:4])[0]
self.done = 1
extack_off = 4
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 6b7eb2d2aaf1..a451cbfbd781 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -228,8 +228,11 @@ class Type(SpecAttr):
presence = ''
for i in range(0, len(ref)):
presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
- if self.presence_type() == 'bit':
- code.append(presence + ' = 1;')
+ # Every layer below last is a nest, so we know it uses bit presence
+ # last layer is "self" and may be a complex type
+ if i == len(ref) - 1 and self.presence_type() != 'bit':
+ continue
+ code.append(presence + ' = 1;')
code += self._setter_lines(ri, member, presence)
func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0b10ad008668..0a33d9195b7a 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -585,7 +585,7 @@ static int add_dead_ends(struct objtool_file *file)
struct section *rsec;
struct reloc *reloc;
struct instruction *insn;
- unsigned long offset;
+ uint64_t offset;
/*
* Check for manually annotated dead ends.
diff --git a/tools/perf/arch/riscv/util/header.c b/tools/perf/arch/riscv/util/header.c
index 4a41856938a8..1b29030021ee 100644
--- a/tools/perf/arch/riscv/util/header.c
+++ b/tools/perf/arch/riscv/util/header.c
@@ -41,7 +41,7 @@ static char *_get_cpuid(void)
char *mimpid = NULL;
char *cpuid = NULL;
int read;
- unsigned long line_sz;
+ size_t line_sz;
FILE *cpuinfo;
cpuinfo = fopen(CPUINFO, "r");
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ec5e21932876..4790c735599b 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -970,7 +970,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
if (dso->annotate_warned)
return -1;
- if (not_annotated) {
+ if (not_annotated || !sym->annotate2) {
err = symbol__annotate2(ms, evsel, &browser.arch);
if (err) {
char msg[BUFSIZ];
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ac002d907d81..50ca92255ff6 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -2461,6 +2461,9 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
if (parch)
*parch = arch;
+ if (!list_empty(&notes->src->source))
+ return 0;
+
args.arch = arch;
args.ms = *ms;
if (annotate_opts.full_addr)
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index fb54bd38e7d0..d931a898c434 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -284,6 +284,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
struct task_struct *curr;
struct mm_struct___old *mm_old;
struct mm_struct___new *mm_new;
+ struct sighand_struct *sighand;
switch (flags) {
case LCB_F_READ: /* rwsem */
@@ -305,7 +306,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
break;
case LCB_F_SPIN: /* spinlock */
curr = bpf_get_current_task_btf();
- if (&curr->sighand->siglock == (void *)lock)
+ sighand = curr->sighand;
+
+ if (sighand && &sighand->siglock == (void *)lock)
return LCD_F_SIGHAND_LOCK;
break;
default:
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 8f08c3fd498d..0d3672e5d9ed 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -67,6 +67,10 @@ The column name "all" can be used to enable all disabled-by-default built-in cou
.PP
\fB--quiet\fP Do not decode and print the system configuration header information.
.PP
++\fB--no-msr\fP Disable all the uses of the MSR driver.
++.PP
++\fB--no-perf\fP Disable all the uses of the perf API.
++.PP
\fB--interval seconds\fP overrides the default 5.0 second measurement interval.
.PP
\fB--num_iterations num\fP number of the measurement iterations.
@@ -125,9 +129,17 @@ The system configuration dump (if --quiet is not used) is followed by statistics
.PP
\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
.PP
-\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms or /sys/class/drm/card0/gt/gt0/rc6_residency_ms or /sys/class/drm/card0/device/tile0/gtN/gtidle/idle_residency_ms depending on the graphics driver being used.
.PP
-\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
+\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used.
+.PP
+\fBGFXAMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz or /sys/class/drm/card0/gt_act_freq_mhz or /sys/class/drm/card0/gt/gt0/rps_act_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/act_freq depending on the graphics driver being used.
+.PP
+\fBSAM%mc6\fP The percentage of time the SA Media is in the "module C6" state, mc6, during the measurement interval. From /sys/class/drm/card0/gt/gt1/rc6_residency_ms or /sys/class/drm/card0/device/tile0/gtN/gtidle/idle_residency_ms depending on the graphics driver being used.
+.PP
+\fBSAMMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/drm/card0/gt/gt1/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used.
+.PP
+\fBSAMAMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/drm/card0/gt/gt1/rps_act_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/act_freq depending on the graphics driver being used.
.PP
\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
.PP
@@ -370,7 +382,7 @@ below the processor's base frequency.
Busy% = MPERF_delta/TSC_delta
-Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
+Bzy_MHz = TSC_delta*APERF_delta/MPERF_delta/measurement_interval
Note that these calculations depend on TSC_delta, so they
are not reliable during intervals when TSC_MHz is not running at the base frequency.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 7a334377f92b..98256468e248 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3,7 +3,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel and AMD processors.
*
- * Copyright (c) 2023 Intel Corporation.
+ * Copyright (c) 2024 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -36,6 +36,8 @@
#include <linux/perf_event.h>
#include <asm/unistd.h>
#include <stdbool.h>
+#include <assert.h>
+#include <linux/kernel.h>
#define UNUSED(x) (void)(x)
@@ -53,9 +55,13 @@
#define NAME_BYTES 20
#define PATH_BYTES 128
+#define MAX_NOFILE 0x8000
+
enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC };
enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT };
+enum amperf_source { AMPERF_SOURCE_PERF, AMPERF_SOURCE_MSR };
+enum rapl_source { RAPL_SOURCE_NONE, RAPL_SOURCE_PERF, RAPL_SOURCE_MSR };
struct msr_counter {
unsigned int msr_num;
@@ -127,6 +133,9 @@ struct msr_counter bic[] = {
{ 0x0, "IPC", "", 0, 0, 0, NULL, 0 },
{ 0x0, "CoreThr", "", 0, 0, 0, NULL, 0 },
{ 0x0, "UncMHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "SAM%mc6", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "SAMMHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "SAMAMHz", "", 0, 0, 0, NULL, 0 },
};
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -185,11 +194,14 @@ struct msr_counter bic[] = {
#define BIC_IPC (1ULL << 52)
#define BIC_CORE_THROT_CNT (1ULL << 53)
#define BIC_UNCORE_MHZ (1ULL << 54)
+#define BIC_SAM_mc6 (1ULL << 55)
+#define BIC_SAMMHz (1ULL << 56)
+#define BIC_SAMACTMHz (1ULL << 57)
#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die )
#define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__)
-#define BIC_FREQUENCY ( BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_UNCORE_MHZ)
-#define BIC_IDLE ( BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX)
+#define BIC_FREQUENCY (BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_SAMMHz | BIC_SAMACTMHz | BIC_UNCORE_MHZ)
+#define BIC_IDLE (BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_SAM_mc6)
#define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
@@ -204,10 +216,13 @@ unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC
#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
#define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT)
+struct amperf_group_fd;
+
char *proc_stat = "/proc/stat";
FILE *outf;
int *fd_percpu;
int *fd_instr_count_percpu;
+struct amperf_group_fd *fd_amperf_percpu; /* File descriptors for perf group with APERF and MPERF counters. */
struct timeval interval_tv = { 5, 0 };
struct timespec interval_ts = { 5, 0 };
@@ -242,11 +257,8 @@ char *output_buffer, *outp;
unsigned int do_dts;
unsigned int do_ptm;
unsigned int do_ipc;
-unsigned long long gfx_cur_rc6_ms;
unsigned long long cpuidle_cur_cpu_lpi_us;
unsigned long long cpuidle_cur_sys_lpi_us;
-unsigned int gfx_cur_mhz;
-unsigned int gfx_act_mhz;
unsigned int tj_max;
unsigned int tj_max_override;
double rapl_power_units, rapl_time_units;
@@ -263,6 +275,28 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int first_counter_read = 1;
int ignore_stdin;
+bool no_msr;
+bool no_perf;
+enum amperf_source amperf_source;
+
+enum gfx_sysfs_idx {
+ GFX_rc6,
+ GFX_MHz,
+ GFX_ACTMHz,
+ SAM_mc6,
+ SAM_MHz,
+ SAM_ACTMHz,
+ GFX_MAX
+};
+
+struct gfx_sysfs_info {
+ const char *path;
+ FILE *fp;
+ unsigned int val;
+ unsigned long long val_ull;
+};
+
+static struct gfx_sysfs_info gfx_info[GFX_MAX];
int get_msr(int cpu, off_t offset, unsigned long long *msr);
@@ -652,6 +686,7 @@ static const struct platform_features icx_features = {
.bclk_freq = BCLK_100MHZ,
.supported_cstates = CC1 | CC6 | PC2 | PC6,
.cst_limit = CST_LIMIT_ICX,
+ .has_msr_core_c1_res = 1,
.has_irtl_msrs = 1,
.has_cst_prewake_bit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
@@ -948,6 +983,175 @@ size_t cpu_present_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affi
#define MAX_ADDED_THREAD_COUNTERS 24
#define BITMASK_SIZE 32
+/* Indexes used to map data read from perf and MSRs into global variables */
+enum rapl_rci_index {
+ RAPL_RCI_INDEX_ENERGY_PKG = 0,
+ RAPL_RCI_INDEX_ENERGY_CORES = 1,
+ RAPL_RCI_INDEX_DRAM = 2,
+ RAPL_RCI_INDEX_GFX = 3,
+ RAPL_RCI_INDEX_PKG_PERF_STATUS = 4,
+ RAPL_RCI_INDEX_DRAM_PERF_STATUS = 5,
+ RAPL_RCI_INDEX_CORE_ENERGY = 6,
+ NUM_RAPL_COUNTERS,
+};
+
+enum rapl_unit {
+ RAPL_UNIT_INVALID,
+ RAPL_UNIT_JOULES,
+ RAPL_UNIT_WATTS,
+};
+
+struct rapl_counter_info_t {
+ unsigned long long data[NUM_RAPL_COUNTERS];
+ enum rapl_source source[NUM_RAPL_COUNTERS];
+ unsigned long long flags[NUM_RAPL_COUNTERS];
+ double scale[NUM_RAPL_COUNTERS];
+ enum rapl_unit unit[NUM_RAPL_COUNTERS];
+
+ union {
+ /* Active when source == RAPL_SOURCE_MSR */
+ struct {
+ unsigned long long msr[NUM_RAPL_COUNTERS];
+ unsigned long long msr_mask[NUM_RAPL_COUNTERS];
+ int msr_shift[NUM_RAPL_COUNTERS];
+ };
+ };
+
+ int fd_perf;
+};
+
+/* struct rapl_counter_info_t for each RAPL domain */
+struct rapl_counter_info_t *rapl_counter_info_perdomain;
+
+#define RAPL_COUNTER_FLAG_USE_MSR_SUM (1u << 1)
+
+struct rapl_counter_arch_info {
+ int feature_mask; /* Mask for testing if the counter is supported on host */
+ const char *perf_subsys;
+ const char *perf_name;
+ unsigned long long msr;
+ unsigned long long msr_mask;
+ int msr_shift; /* Positive mean shift right, negative mean shift left */
+ double *platform_rapl_msr_scale; /* Scale applied to values read by MSR (platform dependent, filled at runtime) */
+ unsigned int rci_index; /* Maps data from perf counters to global variables */
+ unsigned long long bic;
+ double compat_scale; /* Some counters require constant scaling to be in the same range as other, similar ones */
+ unsigned long long flags;
+};
+
+static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
+ {
+ .feature_mask = RAPL_PKG,
+ .perf_subsys = "power",
+ .perf_name = "energy-pkg",
+ .msr = MSR_PKG_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
+ .bic = BIC_PkgWatt | BIC_Pkg_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_AMD_F17H,
+ .perf_subsys = "power",
+ .perf_name = "energy-pkg",
+ .msr = MSR_PKG_ENERGY_STAT,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
+ .bic = BIC_PkgWatt | BIC_Pkg_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_CORE_ENERGY_STATUS,
+ .perf_subsys = "power",
+ .perf_name = "energy-cores",
+ .msr = MSR_PP0_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_CORES,
+ .bic = BIC_CorWatt | BIC_Cor_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_DRAM,
+ .perf_subsys = "power",
+ .perf_name = "energy-ram",
+ .msr = MSR_DRAM_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_dram_energy_units,
+ .rci_index = RAPL_RCI_INDEX_DRAM,
+ .bic = BIC_RAMWatt | BIC_RAM_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_GFX,
+ .perf_subsys = "power",
+ .perf_name = "energy-gpu",
+ .msr = MSR_PP1_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_GFX,
+ .bic = BIC_GFXWatt | BIC_GFX_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_PKG_PERF_STATUS,
+ .perf_subsys = NULL,
+ .perf_name = NULL,
+ .msr = MSR_PKG_PERF_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_time_units,
+ .rci_index = RAPL_RCI_INDEX_PKG_PERF_STATUS,
+ .bic = BIC_PKG__,
+ .compat_scale = 100.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_DRAM_PERF_STATUS,
+ .perf_subsys = NULL,
+ .perf_name = NULL,
+ .msr = MSR_DRAM_PERF_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_time_units,
+ .rci_index = RAPL_RCI_INDEX_DRAM_PERF_STATUS,
+ .bic = BIC_RAM__,
+ .compat_scale = 100.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_AMD_F17H,
+ .perf_subsys = NULL,
+ .perf_name = NULL,
+ .msr = MSR_CORE_ENERGY_STAT,
+ .msr_mask = 0xFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_CORE_ENERGY,
+ .bic = BIC_CorWatt | BIC_Cor_J,
+ .compat_scale = 1.0,
+ .flags = 0,
+ },
+};
+
+struct rapl_counter {
+ unsigned long long raw_value;
+ enum rapl_unit unit;
+ double scale;
+};
+
struct thread_data {
struct timeval tv_begin;
struct timeval tv_end;
@@ -974,7 +1178,7 @@ struct core_data {
unsigned long long c7;
unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
unsigned int core_temp_c;
- unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
+ struct rapl_counter core_energy; /* MSR_CORE_ENERGY_STAT */
unsigned int core_id;
unsigned long long core_throt_cnt;
unsigned long long counter[MAX_ADDED_COUNTERS];
@@ -989,8 +1193,8 @@ struct pkg_data {
unsigned long long pc8;
unsigned long long pc9;
unsigned long long pc10;
- unsigned long long cpu_lpi;
- unsigned long long sys_lpi;
+ long long cpu_lpi;
+ long long sys_lpi;
unsigned long long pkg_wtd_core_c0;
unsigned long long pkg_any_core_c0;
unsigned long long pkg_any_gfxe_c0;
@@ -998,13 +1202,16 @@ struct pkg_data {
long long gfx_rc6_ms;
unsigned int gfx_mhz;
unsigned int gfx_act_mhz;
+ long long sam_mc6_ms;
+ unsigned int sam_mhz;
+ unsigned int sam_act_mhz;
unsigned int package_id;
- unsigned long long energy_pkg; /* MSR_PKG_ENERGY_STATUS */
- unsigned long long energy_dram; /* MSR_DRAM_ENERGY_STATUS */
- unsigned long long energy_cores; /* MSR_PP0_ENERGY_STATUS */
- unsigned long long energy_gfx; /* MSR_PP1_ENERGY_STATUS */
- unsigned long long rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
- unsigned long long rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
+ struct rapl_counter energy_pkg; /* MSR_PKG_ENERGY_STATUS */
+ struct rapl_counter energy_dram; /* MSR_DRAM_ENERGY_STATUS */
+ struct rapl_counter energy_cores; /* MSR_PP0_ENERGY_STATUS */
+ struct rapl_counter energy_gfx; /* MSR_PP1_ENERGY_STATUS */
+ struct rapl_counter rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
+ struct rapl_counter rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
unsigned int pkg_temp_c;
unsigned int uncore_mhz;
unsigned long long counter[MAX_ADDED_COUNTERS];
@@ -1150,6 +1357,38 @@ struct sys_counters {
struct msr_counter *pp;
} sys;
+void free_sys_counters(void)
+{
+ struct msr_counter *p = sys.tp, *pnext = NULL;
+
+ while (p) {
+ pnext = p->next;
+ free(p);
+ p = pnext;
+ }
+
+ p = sys.cp, pnext = NULL;
+ while (p) {
+ pnext = p->next;
+ free(p);
+ p = pnext;
+ }
+
+ p = sys.pp, pnext = NULL;
+ while (p) {
+ pnext = p->next;
+ free(p);
+ p = pnext;
+ }
+
+ sys.added_thread_counters = 0;
+ sys.added_core_counters = 0;
+ sys.added_package_counters = 0;
+ sys.tp = NULL;
+ sys.cp = NULL;
+ sys.pp = NULL;
+}
+
struct system_summary {
struct thread_data threads;
struct core_data cores;
@@ -1280,34 +1519,60 @@ int get_msr_fd(int cpu)
sprintf(pathname, "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+ err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, "
+ "or run with --no-msr, or run as root", pathname);
fd_percpu[cpu] = fd;
return fd;
}
+static void bic_disable_msr_access(void)
+{
+ const unsigned long bic_msrs =
+ BIC_SMI |
+ BIC_CPU_c1 |
+ BIC_CPU_c3 |
+ BIC_CPU_c6 |
+ BIC_CPU_c7 |
+ BIC_Mod_c6 |
+ BIC_CoreTmp |
+ BIC_Totl_c0 |
+ BIC_Any_c0 |
+ BIC_GFX_c0 |
+ BIC_CPUGFX |
+ BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_PkgTmp;
+
+ bic_enabled &= ~bic_msrs;
+
+ free_sys_counters();
+}
+
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
{
+ assert(!no_perf);
+
return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
}
-static int perf_instr_count_open(int cpu_num)
+static long open_perf_counter(int cpu, unsigned int type, unsigned int config, int group_fd, __u64 read_format)
{
- struct perf_event_attr pea;
- int fd;
+ struct perf_event_attr attr;
+ const pid_t pid = -1;
+ const unsigned long flags = 0;
- memset(&pea, 0, sizeof(struct perf_event_attr));
- pea.type = PERF_TYPE_HARDWARE;
- pea.size = sizeof(struct perf_event_attr);
- pea.config = PERF_COUNT_HW_INSTRUCTIONS;
+ assert(!no_perf);
- /* counter for cpu_num, including user + kernel and all processes */
- fd = perf_event_open(&pea, -1, cpu_num, -1, 0);
- if (fd == -1) {
- warnx("capget(CAP_PERFMON) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
- BIC_NOT_PRESENT(BIC_IPC);
- }
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+
+ attr.type = type;
+ attr.size = sizeof(struct perf_event_attr);
+ attr.config = config;
+ attr.disabled = 0;
+ attr.sample_type = PERF_SAMPLE_IDENTIFIER;
+ attr.read_format = read_format;
+
+ const int fd = perf_event_open(&attr, pid, cpu, group_fd, flags);
return fd;
}
@@ -1317,7 +1582,7 @@ int get_instr_count_fd(int cpu)
if (fd_instr_count_percpu[cpu])
return fd_instr_count_percpu[cpu];
- fd_instr_count_percpu[cpu] = perf_instr_count_open(cpu);
+ fd_instr_count_percpu[cpu] = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, -1, 0);
return fd_instr_count_percpu[cpu];
}
@@ -1326,6 +1591,8 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
{
ssize_t retval;
+ assert(!no_msr);
+
retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
if (retval != sizeof *msr)
@@ -1334,6 +1601,21 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
return 0;
}
+int probe_msr(int cpu, off_t offset)
+{
+ ssize_t retval;
+ unsigned long long dummy;
+
+ assert(!no_msr);
+
+ retval = pread(get_msr_fd(cpu), &dummy, sizeof(dummy), offset);
+
+ if (retval != sizeof(dummy))
+ return 1;
+
+ return 0;
+}
+
#define MAX_DEFERRED 16
char *deferred_add_names[MAX_DEFERRED];
char *deferred_skip_names[MAX_DEFERRED];
@@ -1369,6 +1651,8 @@ void help(void)
" Override default 5-second measurement interval\n"
" -J, --Joules displays energy in Joules instead of Watts\n"
" -l, --list list column headers only\n"
+ " -M, --no-msr Disable all uses of the MSR driver\n"
+ " -P, --no-perf Disable all uses of the perf API\n"
" -n, --num_iterations num\n"
" number of the measurement iterations\n"
" -N, --header_iterations num\n"
@@ -1573,6 +1857,15 @@ void print_header(char *delim)
if (DO_BIC(BIC_GFXACTMHz))
outp += sprintf(outp, "%sGFXAMHz", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_SAM_mc6))
+ outp += sprintf(outp, "%sSAM%%mc6", (printed++ ? delim : ""));
+
+ if (DO_BIC(BIC_SAMMHz))
+ outp += sprintf(outp, "%sSAMMHz", (printed++ ? delim : ""));
+
+ if (DO_BIC(BIC_SAMACTMHz))
+ outp += sprintf(outp, "%sSAMAMHz", (printed++ ? delim : ""));
+
if (DO_BIC(BIC_Totl_c0))
outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
if (DO_BIC(BIC_Any_c0))
@@ -1671,26 +1964,35 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
outp += sprintf(outp, "SMI: %d\n", t->smi_count);
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
- outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, t->counter[i]);
+ outp +=
+ sprintf(outp, "tADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
+ t->counter[i], mp->path);
}
}
- if (c) {
+ if (c && is_cpu_first_thread_in_core(t, c, p)) {
outp += sprintf(outp, "core: %d\n", c->core_id);
outp += sprintf(outp, "c3: %016llX\n", c->c3);
outp += sprintf(outp, "c6: %016llX\n", c->c6);
outp += sprintf(outp, "c7: %016llX\n", c->c7);
outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
outp += sprintf(outp, "cpu_throt_count: %016llX\n", c->core_throt_cnt);
- outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
+
+ const unsigned long long energy_value = c->core_energy.raw_value * c->core_energy.scale;
+ const double energy_scale = c->core_energy.scale;
+
+ if (c->core_energy.unit == RAPL_UNIT_JOULES)
+ outp += sprintf(outp, "Joules: %0llX (scale: %lf)\n", energy_value, energy_scale);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
- outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, c->counter[i]);
+ outp +=
+ sprintf(outp, "cADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
+ c->counter[i], mp->path);
}
outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us);
}
- if (p) {
+ if (p && is_cpu_first_core_in_package(t, c, p)) {
outp += sprintf(outp, "package: %d\n", p->package_id);
outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
@@ -1710,16 +2012,18 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
- outp += sprintf(outp, "Joules PKG: %0llX\n", p->energy_pkg);
- outp += sprintf(outp, "Joules COR: %0llX\n", p->energy_cores);
- outp += sprintf(outp, "Joules GFX: %0llX\n", p->energy_gfx);
- outp += sprintf(outp, "Joules RAM: %0llX\n", p->energy_dram);
- outp += sprintf(outp, "Throttle PKG: %0llX\n", p->rapl_pkg_perf_status);
- outp += sprintf(outp, "Throttle RAM: %0llX\n", p->rapl_dram_perf_status);
+ outp += sprintf(outp, "Joules PKG: %0llX\n", p->energy_pkg.raw_value);
+ outp += sprintf(outp, "Joules COR: %0llX\n", p->energy_cores.raw_value);
+ outp += sprintf(outp, "Joules GFX: %0llX\n", p->energy_gfx.raw_value);
+ outp += sprintf(outp, "Joules RAM: %0llX\n", p->energy_dram.raw_value);
+ outp += sprintf(outp, "Throttle PKG: %0llX\n", p->rapl_pkg_perf_status.raw_value);
+ outp += sprintf(outp, "Throttle RAM: %0llX\n", p->rapl_dram_perf_status.raw_value);
outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
- outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, p->counter[i]);
+ outp +=
+ sprintf(outp, "pADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
+ p->counter[i], mp->path);
}
}
@@ -1728,6 +2032,23 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
return 0;
}
+double rapl_counter_get_value(const struct rapl_counter *c, enum rapl_unit desired_unit, double interval)
+{
+ assert(desired_unit != RAPL_UNIT_INVALID);
+
+ /*
+ * For now we don't expect anything other than joules,
+ * so just simplify the logic.
+ */
+ assert(c->unit == RAPL_UNIT_JOULES);
+
+ const double scaled = c->raw_value * c->scale;
+
+ if (desired_unit == RAPL_UNIT_WATTS)
+ return scaled / interval;
+ return scaled;
+}
+
/*
* column formatting convention & formats
*/
@@ -1921,9 +2242,11 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_CorWatt) && platform->has_per_core_rapl)
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+ sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&c->core_energy, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_Cor_J) && platform->has_per_core_rapl)
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&c->core_energy, RAPL_UNIT_JOULES, interval_float));
/* print per-package data only for 1st core in package */
if (!is_cpu_first_core_in_package(t, c, p))
@@ -1951,6 +2274,24 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_GFXACTMHz))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_act_mhz);
+ /* SAMmc6 */
+ if (DO_BIC(BIC_SAM_mc6)) {
+ if (p->sam_mc6_ms == -1) { /* detect GFX counter reset */
+ outp += sprintf(outp, "%s**.**", (printed++ ? delim : ""));
+ } else {
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
+ p->sam_mc6_ms / 10.0 / interval_float);
+ }
+ }
+
+ /* SAMMHz */
+ if (DO_BIC(BIC_SAMMHz))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->sam_mhz);
+
+ /* SAMACTMHz */
+ if (DO_BIC(BIC_SAMACTMHz))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->sam_act_mhz);
+
/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
if (DO_BIC(BIC_Totl_c0))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0 / tsc);
@@ -1976,43 +2317,59 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_Pkgpc10))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10 / tsc);
- if (DO_BIC(BIC_CPU_LPI))
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float);
- if (DO_BIC(BIC_SYS_LPI))
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
+ if (DO_BIC(BIC_CPU_LPI)) {
+ if (p->cpu_lpi >= 0)
+ outp +=
+ sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
+ 100.0 * p->cpu_lpi / 1000000.0 / interval_float);
+ else
+ outp += sprintf(outp, "%s(neg)", (printed++ ? delim : ""));
+ }
+ if (DO_BIC(BIC_SYS_LPI)) {
+ if (p->sys_lpi >= 0)
+ outp +=
+ sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
+ 100.0 * p->sys_lpi / 1000000.0 / interval_float);
+ else
+ outp += sprintf(outp, "%s(neg)", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_PkgWatt))
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
-
+ sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_CorWatt) && !platform->has_per_core_rapl)
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
+ sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_GFXWatt))
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
+ sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_RAMWatt))
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""),
- p->energy_dram * rapl_dram_energy_units / interval_float);
+ rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_Pkg_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_Cor_J) && !platform->has_per_core_rapl)
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_GFX_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_RAM_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units);
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
+ rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_PKG__))
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""),
- 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+ rapl_counter_get_value(&p->rapl_pkg_perf_status, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_RAM__))
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""),
- 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+ rapl_counter_get_value(&p->rapl_dram_perf_status, RAPL_UNIT_WATTS, interval_float));
/* UncMHz */
if (DO_BIC(BIC_UNCORE_MHZ))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->uncore_mhz);
@@ -2121,12 +2478,22 @@ int delta_package(struct pkg_data *new, struct pkg_data *old)
old->gfx_mhz = new->gfx_mhz;
old->gfx_act_mhz = new->gfx_act_mhz;
- old->energy_pkg = new->energy_pkg - old->energy_pkg;
- old->energy_cores = new->energy_cores - old->energy_cores;
- old->energy_gfx = new->energy_gfx - old->energy_gfx;
- old->energy_dram = new->energy_dram - old->energy_dram;
- old->rapl_pkg_perf_status = new->rapl_pkg_perf_status - old->rapl_pkg_perf_status;
- old->rapl_dram_perf_status = new->rapl_dram_perf_status - old->rapl_dram_perf_status;
+ /* flag an error when mc6 counter resets/wraps */
+ if (old->sam_mc6_ms > new->sam_mc6_ms)
+ old->sam_mc6_ms = -1;
+ else
+ old->sam_mc6_ms = new->sam_mc6_ms - old->sam_mc6_ms;
+
+ old->sam_mhz = new->sam_mhz;
+ old->sam_act_mhz = new->sam_act_mhz;
+
+ old->energy_pkg.raw_value = new->energy_pkg.raw_value - old->energy_pkg.raw_value;
+ old->energy_cores.raw_value = new->energy_cores.raw_value - old->energy_cores.raw_value;
+ old->energy_gfx.raw_value = new->energy_gfx.raw_value - old->energy_gfx.raw_value;
+ old->energy_dram.raw_value = new->energy_dram.raw_value - old->energy_dram.raw_value;
+ old->rapl_pkg_perf_status.raw_value = new->rapl_pkg_perf_status.raw_value - old->rapl_pkg_perf_status.raw_value;
+ old->rapl_dram_perf_status.raw_value =
+ new->rapl_dram_perf_status.raw_value - old->rapl_dram_perf_status.raw_value;
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
@@ -2150,7 +2517,7 @@ void delta_core(struct core_data *new, struct core_data *old)
old->core_throt_cnt = new->core_throt_cnt;
old->mc6_us = new->mc6_us - old->mc6_us;
- DELTA_WRAP32(new->core_energy, old->core_energy);
+ DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
@@ -2277,6 +2644,13 @@ int delta_cpu(struct thread_data *t, struct core_data *c,
return retval;
}
+void rapl_counter_clear(struct rapl_counter *c)
+{
+ c->raw_value = 0;
+ c->scale = 0.0;
+ c->unit = RAPL_UNIT_INVALID;
+}
+
void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
int i;
@@ -2304,7 +2678,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->c7 = 0;
c->mc6_us = 0;
c->core_temp_c = 0;
- c->core_energy = 0;
+ rapl_counter_clear(&c->core_energy);
c->core_throt_cnt = 0;
p->pkg_wtd_core_c0 = 0;
@@ -2325,18 +2699,21 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
p->cpu_lpi = 0;
p->sys_lpi = 0;
- p->energy_pkg = 0;
- p->energy_dram = 0;
- p->energy_cores = 0;
- p->energy_gfx = 0;
- p->rapl_pkg_perf_status = 0;
- p->rapl_dram_perf_status = 0;
+ rapl_counter_clear(&p->energy_pkg);
+ rapl_counter_clear(&p->energy_dram);
+ rapl_counter_clear(&p->energy_cores);
+ rapl_counter_clear(&p->energy_gfx);
+ rapl_counter_clear(&p->rapl_pkg_perf_status);
+ rapl_counter_clear(&p->rapl_dram_perf_status);
p->pkg_temp_c = 0;
p->gfx_rc6_ms = 0;
p->uncore_mhz = 0;
p->gfx_mhz = 0;
p->gfx_act_mhz = 0;
+ p->sam_mc6_ms = 0;
+ p->sam_mhz = 0;
+ p->sam_act_mhz = 0;
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
t->counter[i] = 0;
@@ -2347,6 +2724,20 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
p->counter[i] = 0;
}
+void rapl_counter_accumulate(struct rapl_counter *dst, const struct rapl_counter *src)
+{
+ /* Copy unit and scale from src if dst is not initialized */
+ if (dst->unit == RAPL_UNIT_INVALID) {
+ dst->unit = src->unit;
+ dst->scale = src->scale;
+ }
+
+ assert(dst->unit == src->unit);
+ assert(dst->scale == src->scale);
+
+ dst->raw_value += src->raw_value;
+}
+
int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
int i;
@@ -2393,7 +2784,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
average.cores.core_throt_cnt = MAX(average.cores.core_throt_cnt, c->core_throt_cnt);
- average.cores.core_energy += c->core_energy;
+ rapl_counter_accumulate(&average.cores.core_energy, &c->core_energy);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
@@ -2428,25 +2819,29 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.packages.cpu_lpi = p->cpu_lpi;
average.packages.sys_lpi = p->sys_lpi;
- average.packages.energy_pkg += p->energy_pkg;
- average.packages.energy_dram += p->energy_dram;
- average.packages.energy_cores += p->energy_cores;
- average.packages.energy_gfx += p->energy_gfx;
+ rapl_counter_accumulate(&average.packages.energy_pkg, &p->energy_pkg);
+ rapl_counter_accumulate(&average.packages.energy_dram, &p->energy_dram);
+ rapl_counter_accumulate(&average.packages.energy_cores, &p->energy_cores);
+ rapl_counter_accumulate(&average.packages.energy_gfx, &p->energy_gfx);
average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
average.packages.uncore_mhz = p->uncore_mhz;
average.packages.gfx_mhz = p->gfx_mhz;
average.packages.gfx_act_mhz = p->gfx_act_mhz;
+ average.packages.sam_mc6_ms = p->sam_mc6_ms;
+ average.packages.sam_mhz = p->sam_mhz;
+ average.packages.sam_act_mhz = p->sam_act_mhz;
average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
- average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
- average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
+ rapl_counter_accumulate(&average.packages.rapl_pkg_perf_status, &p->rapl_pkg_perf_status);
+ rapl_counter_accumulate(&average.packages.rapl_dram_perf_status, &p->rapl_dram_perf_status);
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW)
- continue;
- average.packages.counter[i] += p->counter[i];
+ if ((mp->format == FORMAT_RAW) && (topo.num_packages == 0))
+ average.packages.counter[i] = p->counter[i];
+ else
+ average.packages.counter[i] += p->counter[i];
}
return 0;
}
@@ -2578,6 +2973,7 @@ unsigned long long snapshot_sysfs_counter(char *path)
int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
{
if (mp->msr_num != 0) {
+ assert(!no_msr);
if (get_msr(cpu, mp->msr_num, counterp))
return -1;
} else {
@@ -2599,7 +2995,7 @@ unsigned long long get_uncore_mhz(int package, int die)
{
char path[128];
- sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package,
+ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz", package,
die);
return (snapshot_sysfs_counter(path) / 1000);
@@ -2627,6 +3023,9 @@ int get_epb(int cpu)
return epb;
msr_fallback:
+ if (no_msr)
+ return -1;
+
get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
return msr & 0xf;
@@ -2700,6 +3099,351 @@ int get_core_throt_cnt(int cpu, unsigned long long *cnt)
return 0;
}
+struct amperf_group_fd {
+ int aperf; /* Also the group descriptor */
+ int mperf;
+};
+
+static int read_perf_counter_info(const char *const path, const char *const parse_format, void *value_ptr)
+{
+ int fdmt;
+ int bytes_read;
+ char buf[64];
+ int ret = -1;
+
+ fdmt = open(path, O_RDONLY, 0);
+ if (fdmt == -1) {
+ if (debug)
+ fprintf(stderr, "Failed to parse perf counter info %s\n", path);
+ ret = -1;
+ goto cleanup_and_exit;
+ }
+
+ bytes_read = read(fdmt, buf, sizeof(buf) - 1);
+ if (bytes_read <= 0 || bytes_read >= (int)sizeof(buf)) {
+ if (debug)
+ fprintf(stderr, "Failed to parse perf counter info %s\n", path);
+ ret = -1;
+ goto cleanup_and_exit;
+ }
+
+ buf[bytes_read] = '\0';
+
+ if (sscanf(buf, parse_format, value_ptr) != 1) {
+ if (debug)
+ fprintf(stderr, "Failed to parse perf counter info %s\n", path);
+ ret = -1;
+ goto cleanup_and_exit;
+ }
+
+ ret = 0;
+
+cleanup_and_exit:
+ close(fdmt);
+ return ret;
+}
+
+static unsigned int read_perf_counter_info_n(const char *const path, const char *const parse_format)
+{
+ unsigned int v;
+ int status;
+
+ status = read_perf_counter_info(path, parse_format, &v);
+ if (status)
+ v = -1;
+
+ return v;
+}
+
+static unsigned int read_msr_type(void)
+{
+ const char *const path = "/sys/bus/event_source/devices/msr/type";
+ const char *const format = "%u";
+
+ return read_perf_counter_info_n(path, format);
+}
+
+static unsigned int read_aperf_config(void)
+{
+ const char *const path = "/sys/bus/event_source/devices/msr/events/aperf";
+ const char *const format = "event=%x";
+
+ return read_perf_counter_info_n(path, format);
+}
+
+static unsigned int read_mperf_config(void)
+{
+ const char *const path = "/sys/bus/event_source/devices/msr/events/mperf";
+ const char *const format = "event=%x";
+
+ return read_perf_counter_info_n(path, format);
+}
+
+static unsigned int read_perf_type(const char *subsys)
+{
+ const char *const path_format = "/sys/bus/event_source/devices/%s/type";
+ const char *const format = "%u";
+ char path[128];
+
+ snprintf(path, sizeof(path), path_format, subsys);
+
+ return read_perf_counter_info_n(path, format);
+}
+
+static unsigned int read_rapl_config(const char *subsys, const char *event_name)
+{
+ const char *const path_format = "/sys/bus/event_source/devices/%s/events/%s";
+ const char *const format = "event=%x";
+ char path[128];
+
+ snprintf(path, sizeof(path), path_format, subsys, event_name);
+
+ return read_perf_counter_info_n(path, format);
+}
+
+static unsigned int read_perf_rapl_unit(const char *subsys, const char *event_name)
+{
+ const char *const path_format = "/sys/bus/event_source/devices/%s/events/%s.unit";
+ const char *const format = "%s";
+ char path[128];
+ char unit_buffer[16];
+
+ snprintf(path, sizeof(path), path_format, subsys, event_name);
+
+ read_perf_counter_info(path, format, &unit_buffer);
+ if (strcmp("Joules", unit_buffer) == 0)
+ return RAPL_UNIT_JOULES;
+
+ return RAPL_UNIT_INVALID;
+}
+
+static double read_perf_rapl_scale(const char *subsys, const char *event_name)
+{
+ const char *const path_format = "/sys/bus/event_source/devices/%s/events/%s.scale";
+ const char *const format = "%lf";
+ char path[128];
+ double scale;
+
+ snprintf(path, sizeof(path), path_format, subsys, event_name);
+
+ if (read_perf_counter_info(path, format, &scale))
+ return 0.0;
+
+ return scale;
+}
+
+static struct amperf_group_fd open_amperf_fd(int cpu)
+{
+ const unsigned int msr_type = read_msr_type();
+ const unsigned int aperf_config = read_aperf_config();
+ const unsigned int mperf_config = read_mperf_config();
+ struct amperf_group_fd fds = {.aperf = -1, .mperf = -1 };
+
+ fds.aperf = open_perf_counter(cpu, msr_type, aperf_config, -1, PERF_FORMAT_GROUP);
+ fds.mperf = open_perf_counter(cpu, msr_type, mperf_config, fds.aperf, PERF_FORMAT_GROUP);
+
+ return fds;
+}
+
+static int get_amperf_fd(int cpu)
+{
+ assert(fd_amperf_percpu);
+
+ if (fd_amperf_percpu[cpu].aperf)
+ return fd_amperf_percpu[cpu].aperf;
+
+ fd_amperf_percpu[cpu] = open_amperf_fd(cpu);
+
+ return fd_amperf_percpu[cpu].aperf;
+}
+
+/* Read APERF, MPERF and TSC using the perf API. */
+static int read_aperf_mperf_tsc_perf(struct thread_data *t, int cpu)
+{
+ union {
+ struct {
+ unsigned long nr_entries;
+ unsigned long aperf;
+ unsigned long mperf;
+ };
+
+ unsigned long as_array[3];
+ } cnt;
+
+ const int fd_amperf = get_amperf_fd(cpu);
+
+ /*
+ * Read the TSC with rdtsc, because we want the absolute value and not
+ * the offset from the start of the counter.
+ */
+ t->tsc = rdtsc();
+
+ const int n = read(fd_amperf, &cnt.as_array[0], sizeof(cnt.as_array));
+
+ if (n != sizeof(cnt.as_array))
+ return -2;
+
+ t->aperf = cnt.aperf * aperf_mperf_multiplier;
+ t->mperf = cnt.mperf * aperf_mperf_multiplier;
+
+ return 0;
+}
+
+/* Read APERF, MPERF and TSC using the MSR driver and rdtsc instruction. */
+static int read_aperf_mperf_tsc_msr(struct thread_data *t, int cpu)
+{
+ unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
+ int aperf_mperf_retry_count = 0;
+
+ /*
+ * The TSC, APERF and MPERF must be read together for
+ * APERF/MPERF and MPERF/TSC to give accurate results.
+ *
+ * Unfortunately, APERF and MPERF are read by
+ * individual system call, so delays may occur
+ * between them. If the time to read them
+ * varies by a large amount, we re-read them.
+ */
+
+ /*
+ * This initial dummy APERF read has been seen to
+ * reduce jitter in the subsequent reads.
+ */
+
+ if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
+ return -3;
+
+retry:
+ t->tsc = rdtsc(); /* re-read close to APERF */
+
+ tsc_before = t->tsc;
+
+ if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
+ return -3;
+
+ tsc_between = rdtsc();
+
+ if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
+ return -4;
+
+ tsc_after = rdtsc();
+
+ aperf_time = tsc_between - tsc_before;
+ mperf_time = tsc_after - tsc_between;
+
+ /*
+ * If the system call latency to read APERF and MPERF
+ * differ by more than 2x, then try again.
+ */
+ if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
+ aperf_mperf_retry_count++;
+ if (aperf_mperf_retry_count < 5)
+ goto retry;
+ else
+ warnx("cpu%d jitter %lld %lld", cpu, aperf_time, mperf_time);
+ }
+ aperf_mperf_retry_count = 0;
+
+ t->aperf = t->aperf * aperf_mperf_multiplier;
+ t->mperf = t->mperf * aperf_mperf_multiplier;
+
+ return 0;
+}
+
+size_t rapl_counter_info_count_perf(const struct rapl_counter_info_t *rci)
+{
+ size_t ret = 0;
+
+ for (int i = 0; i < NUM_RAPL_COUNTERS; ++i)
+ if (rci->source[i] == RAPL_SOURCE_PERF)
+ ++ret;
+
+ return ret;
+}
+
+void write_rapl_counter(struct rapl_counter *rc, struct rapl_counter_info_t *rci, unsigned int idx)
+{
+ rc->raw_value = rci->data[idx];
+ rc->unit = rci->unit[idx];
+ rc->scale = rci->scale[idx];
+}
+
+int get_rapl_counters(int cpu, int domain, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long perf_data[NUM_RAPL_COUNTERS + 1];
+ struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain];
+
+ if (debug)
+ fprintf(stderr, "%s: cpu%d domain%d\n", __func__, cpu, domain);
+
+ assert(rapl_counter_info_perdomain);
+
+ /*
+ * If we have any perf counters to read, read them all now, in bulk
+ */
+ if (rci->fd_perf != -1) {
+ size_t num_perf_counters = rapl_counter_info_count_perf(rci);
+ const ssize_t expected_read_size = (num_perf_counters + 1) * sizeof(unsigned long long);
+ const ssize_t actual_read_size = read(rci->fd_perf, &perf_data[0], sizeof(perf_data));
+
+ if (actual_read_size != expected_read_size)
+ err(-1, "%s: failed to read perf_data (%zu %zu)", __func__, expected_read_size,
+ actual_read_size);
+ }
+
+ for (unsigned int i = 0, pi = 1; i < NUM_RAPL_COUNTERS; ++i) {
+ switch (rci->source[i]) {
+ case RAPL_SOURCE_NONE:
+ break;
+
+ case RAPL_SOURCE_PERF:
+ assert(pi < ARRAY_SIZE(perf_data));
+ assert(rci->fd_perf != -1);
+
+ if (debug)
+ fprintf(stderr, "Reading rapl counter via perf at %u (%llu %e %lf)\n",
+ i, perf_data[pi], rci->scale[i], perf_data[pi] * rci->scale[i]);
+
+ rci->data[i] = perf_data[pi];
+
+ ++pi;
+ break;
+
+ case RAPL_SOURCE_MSR:
+ if (debug)
+ fprintf(stderr, "Reading rapl counter via msr at %u\n", i);
+
+ assert(!no_msr);
+ if (rci->flags[i] & RAPL_COUNTER_FLAG_USE_MSR_SUM) {
+ if (get_msr_sum(cpu, rci->msr[i], &rci->data[i]))
+ return -13 - i;
+ } else {
+ if (get_msr(cpu, rci->msr[i], &rci->data[i]))
+ return -13 - i;
+ }
+
+ rci->data[i] &= rci->msr_mask[i];
+ if (rci->msr_shift[i] >= 0)
+ rci->data[i] >>= abs(rci->msr_shift[i]);
+ else
+ rci->data[i] <<= abs(rci->msr_shift[i]);
+
+ break;
+ }
+ }
+
+ _Static_assert(NUM_RAPL_COUNTERS == 7);
+ write_rapl_counter(&p->energy_pkg, rci, RAPL_RCI_INDEX_ENERGY_PKG);
+ write_rapl_counter(&p->energy_cores, rci, RAPL_RCI_INDEX_ENERGY_CORES);
+ write_rapl_counter(&p->energy_dram, rci, RAPL_RCI_INDEX_DRAM);
+ write_rapl_counter(&p->energy_gfx, rci, RAPL_RCI_INDEX_GFX);
+ write_rapl_counter(&p->rapl_pkg_perf_status, rci, RAPL_RCI_INDEX_PKG_PERF_STATUS);
+ write_rapl_counter(&p->rapl_dram_perf_status, rci, RAPL_RCI_INDEX_DRAM_PERF_STATUS);
+ write_rapl_counter(&c->core_energy, rci, RAPL_RCI_INDEX_CORE_ENERGY);
+
+ return 0;
+}
+
/*
* get_counters(...)
* migrate to cpu
@@ -2709,12 +3453,12 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
int cpu = t->cpu_id;
unsigned long long msr;
- int aperf_mperf_retry_count = 0;
struct msr_counter *mp;
int i;
+ int status;
if (cpu_migrate(cpu)) {
- fprintf(outf, "get_counters: Could not migrate to CPU %d\n", cpu);
+ fprintf(outf, "%s: Could not migrate to CPU %d\n", __func__, cpu);
return -1;
}
@@ -2722,63 +3466,26 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (first_counter_read)
get_apic_id(t);
-retry:
+
t->tsc = rdtsc(); /* we are running on local CPU of interest */
if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || DO_BIC(BIC_IPC)
|| soft_c1_residency_display(BIC_Avg_MHz)) {
- unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
-
- /*
- * The TSC, APERF and MPERF must be read together for
- * APERF/MPERF and MPERF/TSC to give accurate results.
- *
- * Unfortunately, APERF and MPERF are read by
- * individual system call, so delays may occur
- * between them. If the time to read them
- * varies by a large amount, we re-read them.
- */
-
- /*
- * This initial dummy APERF read has been seen to
- * reduce jitter in the subsequent reads.
- */
-
- if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
- return -3;
-
- t->tsc = rdtsc(); /* re-read close to APERF */
-
- tsc_before = t->tsc;
+ int status = -1;
- if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
- return -3;
+ assert(!no_perf || !no_msr);
- tsc_between = rdtsc();
-
- if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
- return -4;
-
- tsc_after = rdtsc();
-
- aperf_time = tsc_between - tsc_before;
- mperf_time = tsc_after - tsc_between;
-
- /*
- * If the system call latency to read APERF and MPERF
- * differ by more than 2x, then try again.
- */
- if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
- aperf_mperf_retry_count++;
- if (aperf_mperf_retry_count < 5)
- goto retry;
- else
- warnx("cpu%d jitter %lld %lld", cpu, aperf_time, mperf_time);
+ switch (amperf_source) {
+ case AMPERF_SOURCE_PERF:
+ status = read_aperf_mperf_tsc_perf(t, cpu);
+ break;
+ case AMPERF_SOURCE_MSR:
+ status = read_aperf_mperf_tsc_msr(t, cpu);
+ break;
}
- aperf_mperf_retry_count = 0;
- t->aperf = t->aperf * aperf_mperf_multiplier;
- t->mperf = t->mperf * aperf_mperf_multiplier;
+ if (status != 0)
+ return status;
}
if (DO_BIC(BIC_IPC))
@@ -2806,6 +3513,12 @@ retry:
if (!is_cpu_first_thread_in_core(t, c, p))
goto done;
+ if (platform->has_per_core_rapl) {
+ status = get_rapl_counters(cpu, c->core_id, c, p);
+ if (status != 0)
+ return status;
+ }
+
if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
@@ -2846,12 +3559,6 @@ retry:
if (DO_BIC(BIC_CORE_THROT_CNT))
get_core_throt_cnt(cpu, &c->core_throt_cnt);
- if (platform->rapl_msrs & RAPL_AMD_F17H) {
- if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
- return -14;
- c->core_energy = msr & 0xFFFFFFFF;
- }
-
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (get_mp(cpu, mp, &c->counter[i]))
return -10;
@@ -2911,59 +3618,39 @@ retry:
if (DO_BIC(BIC_SYS_LPI))
p->sys_lpi = cpuidle_cur_sys_lpi_us;
- if (platform->rapl_msrs & RAPL_PKG) {
- if (get_msr_sum(cpu, MSR_PKG_ENERGY_STATUS, &msr))
- return -13;
- p->energy_pkg = msr;
- }
- if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS) {
- if (get_msr_sum(cpu, MSR_PP0_ENERGY_STATUS, &msr))
- return -14;
- p->energy_cores = msr;
- }
- if (platform->rapl_msrs & RAPL_DRAM) {
- if (get_msr_sum(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
- return -15;
- p->energy_dram = msr;
- }
- if (platform->rapl_msrs & RAPL_GFX) {
- if (get_msr_sum(cpu, MSR_PP1_ENERGY_STATUS, &msr))
- return -16;
- p->energy_gfx = msr;
- }
- if (platform->rapl_msrs & RAPL_PKG_PERF_STATUS) {
- if (get_msr_sum(cpu, MSR_PKG_PERF_STATUS, &msr))
- return -16;
- p->rapl_pkg_perf_status = msr;
- }
- if (platform->rapl_msrs & RAPL_DRAM_PERF_STATUS) {
- if (get_msr_sum(cpu, MSR_DRAM_PERF_STATUS, &msr))
- return -16;
- p->rapl_dram_perf_status = msr;
- }
- if (platform->rapl_msrs & RAPL_AMD_F17H) {
- if (get_msr_sum(cpu, MSR_PKG_ENERGY_STAT, &msr))
- return -13;
- p->energy_pkg = msr;
+ if (!platform->has_per_core_rapl) {
+ status = get_rapl_counters(cpu, p->package_id, c, p);
+ if (status != 0)
+ return status;
}
+
if (DO_BIC(BIC_PkgTmp)) {
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
return -17;
p->pkg_temp_c = tj_max - ((msr >> 16) & 0x7F);
}
- if (DO_BIC(BIC_GFX_rc6))
- p->gfx_rc6_ms = gfx_cur_rc6_ms;
-
/* n.b. assume die0 uncore frequency applies to whole package */
if (DO_BIC(BIC_UNCORE_MHZ))
p->uncore_mhz = get_uncore_mhz(p->package_id, 0);
+ if (DO_BIC(BIC_GFX_rc6))
+ p->gfx_rc6_ms = gfx_info[GFX_rc6].val_ull;
+
if (DO_BIC(BIC_GFXMHz))
- p->gfx_mhz = gfx_cur_mhz;
+ p->gfx_mhz = gfx_info[GFX_MHz].val;
if (DO_BIC(BIC_GFXACTMHz))
- p->gfx_act_mhz = gfx_act_mhz;
+ p->gfx_act_mhz = gfx_info[GFX_ACTMHz].val;
+
+ if (DO_BIC(BIC_SAM_mc6))
+ p->sam_mc6_ms = gfx_info[SAM_mc6].val_ull;
+
+ if (DO_BIC(BIC_SAMMHz))
+ p->sam_mhz = gfx_info[SAM_MHz].val;
+
+ if (DO_BIC(BIC_SAMACTMHz))
+ p->sam_act_mhz = gfx_info[SAM_ACTMHz].val;
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
if (get_mp(cpu, mp, &p->counter[i]))
@@ -3053,7 +3740,7 @@ void probe_cst_limit(void)
unsigned long long msr;
int *pkg_cstate_limits;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
switch (platform->cst_limit) {
@@ -3097,7 +3784,7 @@ static void dump_platform_info(void)
unsigned long long msr;
unsigned int ratio;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
@@ -3115,7 +3802,7 @@ static void dump_power_ctl(void)
{
unsigned long long msr;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
@@ -3321,7 +4008,7 @@ static void dump_cst_cfg(void)
{
unsigned long long msr;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
@@ -3393,7 +4080,7 @@ void print_irtl(void)
{
unsigned long long msr;
- if (!platform->has_irtl_msrs)
+ if (!platform->has_irtl_msrs || no_msr)
return;
if (platform->supported_cstates & PC3) {
@@ -3443,12 +4130,64 @@ void free_fd_percpu(void)
{
int i;
+ if (!fd_percpu)
+ return;
+
for (i = 0; i < topo.max_cpu_num + 1; ++i) {
if (fd_percpu[i] != 0)
close(fd_percpu[i]);
}
free(fd_percpu);
+ fd_percpu = NULL;
+}
+
+void free_fd_amperf_percpu(void)
+{
+ int i;
+
+ if (!fd_amperf_percpu)
+ return;
+
+ for (i = 0; i < topo.max_cpu_num + 1; ++i) {
+ if (fd_amperf_percpu[i].mperf != 0)
+ close(fd_amperf_percpu[i].mperf);
+
+ if (fd_amperf_percpu[i].aperf != 0)
+ close(fd_amperf_percpu[i].aperf);
+ }
+
+ free(fd_amperf_percpu);
+ fd_amperf_percpu = NULL;
+}
+
+void free_fd_instr_count_percpu(void)
+{
+ if (!fd_instr_count_percpu)
+ return;
+
+ for (int i = 0; i < topo.max_cpu_num + 1; ++i) {
+ if (fd_instr_count_percpu[i] != 0)
+ close(fd_instr_count_percpu[i]);
+ }
+
+ free(fd_instr_count_percpu);
+ fd_instr_count_percpu = NULL;
+}
+
+void free_fd_rapl_percpu(void)
+{
+ if (!rapl_counter_info_perdomain)
+ return;
+
+ const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
+
+ for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ if (rapl_counter_info_perdomain[domain_id].fd_perf != -1)
+ close(rapl_counter_info_perdomain[domain_id].fd_perf);
+ }
+
+ free(rapl_counter_info_perdomain);
}
void free_all_buffers(void)
@@ -3492,6 +4231,9 @@ void free_all_buffers(void)
outp = NULL;
free_fd_percpu();
+ free_fd_instr_count_percpu();
+ free_fd_amperf_percpu();
+ free_fd_rapl_percpu();
free(irq_column_2_cpu);
free(irqs_per_cpu);
@@ -3825,11 +4567,17 @@ static void update_effective_set(bool startup)
err(1, "%s: cpu str malformat %s\n", PATH_EFFECTIVE_CPUS, cpu_effective_str);
}
+void linux_perf_init(void);
+void rapl_perf_init(void);
+
void re_initialize(void)
{
free_all_buffers();
setup_all_buffers(false);
- fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, topo.allowed_cpus);
+ linux_perf_init();
+ rapl_perf_init();
+ fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus,
+ topo.allowed_cpus);
}
void set_max_cpu_num(void)
@@ -3940,85 +4688,43 @@ int snapshot_proc_interrupts(void)
}
/*
- * snapshot_gfx_rc6_ms()
+ * snapshot_graphics()
*
- * record snapshot of
- * /sys/class/drm/card0/power/rc6_residency_ms
+ * record snapshot of specified graphics sysfs knob
*
* return 1 if config change requires a restart, else return 0
*/
-int snapshot_gfx_rc6_ms(void)
+int snapshot_graphics(int idx)
{
FILE *fp;
int retval;
- fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r");
-
- retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms);
- if (retval != 1)
- err(1, "GFX rc6");
-
- fclose(fp);
-
- return 0;
-}
-
-/*
- * snapshot_gfx_mhz()
- *
- * fall back to /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz
- * when /sys/class/drm/card0/gt_cur_freq_mhz is not available.
- *
- * return 1 if config change requires a restart, else return 0
- */
-int snapshot_gfx_mhz(void)
-{
- static FILE *fp;
- int retval;
-
- if (fp == NULL) {
- fp = fopen("/sys/class/drm/card0/gt_cur_freq_mhz", "r");
- if (!fp)
- fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
- } else {
- rewind(fp);
- fflush(fp);
- }
-
- retval = fscanf(fp, "%d", &gfx_cur_mhz);
- if (retval != 1)
- err(1, "GFX MHz");
-
- return 0;
-}
-
-/*
- * snapshot_gfx_cur_mhz()
- *
- * fall back to /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz
- * when /sys/class/drm/card0/gt_act_freq_mhz is not available.
- *
- * return 1 if config change requires a restart, else return 0
- */
-int snapshot_gfx_act_mhz(void)
-{
- static FILE *fp;
- int retval;
-
- if (fp == NULL) {
- fp = fopen("/sys/class/drm/card0/gt_act_freq_mhz", "r");
- if (!fp)
- fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", "r");
- } else {
- rewind(fp);
- fflush(fp);
+ switch (idx) {
+ case GFX_rc6:
+ case SAM_mc6:
+ fp = fopen_or_die(gfx_info[idx].path, "r");
+ retval = fscanf(fp, "%lld", &gfx_info[idx].val_ull);
+ if (retval != 1)
+ err(1, "rc6");
+ fclose(fp);
+ return 0;
+ case GFX_MHz:
+ case GFX_ACTMHz:
+ case SAM_MHz:
+ case SAM_ACTMHz:
+ if (gfx_info[idx].fp == NULL) {
+ gfx_info[idx].fp = fopen_or_die(gfx_info[idx].path, "r");
+ } else {
+ rewind(gfx_info[idx].fp);
+ fflush(gfx_info[idx].fp);
+ }
+ retval = fscanf(gfx_info[idx].fp, "%d", &gfx_info[idx].val);
+ if (retval != 1)
+ err(1, "MHz");
+ return 0;
+ default:
+ return -EINVAL;
}
-
- retval = fscanf(fp, "%d", &gfx_act_mhz);
- if (retval != 1)
- err(1, "GFX ACT MHz");
-
- return 0;
}
/*
@@ -4083,13 +4789,22 @@ int snapshot_proc_sysfs_files(void)
return 1;
if (DO_BIC(BIC_GFX_rc6))
- snapshot_gfx_rc6_ms();
+ snapshot_graphics(GFX_rc6);
if (DO_BIC(BIC_GFXMHz))
- snapshot_gfx_mhz();
+ snapshot_graphics(GFX_MHz);
if (DO_BIC(BIC_GFXACTMHz))
- snapshot_gfx_act_mhz();
+ snapshot_graphics(GFX_ACTMHz);
+
+ if (DO_BIC(BIC_SAM_mc6))
+ snapshot_graphics(SAM_mc6);
+
+ if (DO_BIC(BIC_SAMMHz))
+ snapshot_graphics(SAM_MHz);
+
+ if (DO_BIC(BIC_SAMACTMHz))
+ snapshot_graphics(SAM_ACTMHz);
if (DO_BIC(BIC_CPU_LPI))
snapshot_cpu_lpi_us();
@@ -4173,6 +4888,8 @@ int get_msr_sum(int cpu, off_t offset, unsigned long long *msr)
int ret, idx;
unsigned long long msr_cur, msr_last;
+ assert(!no_msr);
+
if (!per_cpu_msr_sum)
return 1;
@@ -4201,6 +4918,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
UNUSED(c);
UNUSED(p);
+ assert(!no_msr);
+
for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
unsigned long long msr_cur, msr_last;
off_t offset;
@@ -4280,7 +4999,8 @@ release_msr:
/*
* set_my_sched_priority(pri)
- * return previous
+ * return previous priority on success
+ * return value < -20 on failure
*/
int set_my_sched_priority(int priority)
{
@@ -4290,16 +5010,16 @@ int set_my_sched_priority(int priority)
errno = 0;
original_priority = getpriority(PRIO_PROCESS, 0);
if (errno && (original_priority == -1))
- err(errno, "getpriority");
+ return -21;
retval = setpriority(PRIO_PROCESS, 0, priority);
if (retval)
- errx(retval, "capget(CAP_SYS_NICE) failed,try \"# setcap cap_sys_nice=ep %s\"", progname);
+ return -21;
errno = 0;
retval = getpriority(PRIO_PROCESS, 0);
if (retval != priority)
- err(retval, "getpriority(%d) != setpriority(%d)", retval, priority);
+ return -21;
return original_priority;
}
@@ -4314,6 +5034,9 @@ void turbostat_loop()
/*
* elevate own priority for interval mode
+ *
+ * ignore on error - we probably don't have permission to set it, but
+ * it's not a big deal
*/
set_my_sched_priority(-20);
@@ -4399,10 +5122,13 @@ void check_dev_msr()
struct stat sb;
char pathname[32];
+ if (no_msr)
+ return;
+
sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
if (stat(pathname, &sb))
if (system("/sbin/modprobe msr > /dev/null 2>&1"))
- err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+ no_msr = 1;
}
/*
@@ -4414,47 +5140,51 @@ int check_for_cap_sys_rawio(void)
{
cap_t caps;
cap_flag_value_t cap_flag_value;
+ int ret = 0;
caps = cap_get_proc();
if (caps == NULL)
- err(-6, "cap_get_proc\n");
+ return 1;
- if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value))
- err(-6, "cap_get\n");
+ if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) {
+ ret = 1;
+ goto free_and_exit;
+ }
if (cap_flag_value != CAP_SET) {
- warnx("capget(CAP_SYS_RAWIO) failed," " try \"# setcap cap_sys_rawio=ep %s\"", progname);
- return 1;
+ ret = 1;
+ goto free_and_exit;
}
+free_and_exit:
if (cap_free(caps) == -1)
err(-6, "cap_free\n");
- return 0;
+ return ret;
}
-void check_permissions(void)
+void check_msr_permission(void)
{
- int do_exit = 0;
+ int failed = 0;
char pathname[32];
+ if (no_msr)
+ return;
+
/* check for CAP_SYS_RAWIO */
- do_exit += check_for_cap_sys_rawio();
+ failed += check_for_cap_sys_rawio();
/* test file permissions */
sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
if (euidaccess(pathname, R_OK)) {
- do_exit++;
- warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
+ failed++;
}
- /* if all else fails, thell them to be root */
- if (do_exit)
- if (getuid() != 0)
- warnx("... or simply run as root");
-
- if (do_exit)
- exit(-6);
+ if (failed) {
+ warnx("Failed to access %s. Some of the counters may not be available\n"
+ "\tRun as root to enable them or use %s to disable the access explicitly", pathname, "--no-msr");
+ no_msr = 1;
+ }
}
void probe_bclk(void)
@@ -4462,7 +5192,7 @@ void probe_bclk(void)
unsigned long long msr;
unsigned int base_ratio;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
if (platform->bclk_freq == BCLK_100MHZ)
@@ -4502,7 +5232,7 @@ static void dump_turbo_ratio_info(void)
if (!has_turbo)
return;
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
return;
if (platform->trl_msrs & TRL_LIMIT2)
@@ -4567,20 +5297,15 @@ static void dump_sysfs_file(char *path)
static void probe_intel_uncore_frequency(void)
{
int i, j;
- char path[128];
+ char path[256];
if (!genuine_intel)
return;
- if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00", R_OK))
- return;
-
- /* Cluster level sysfs not supported yet. */
- if (!access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00", R_OK))
- return;
+ if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK))
+ goto probe_cluster;
- if (!access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK))
- BIC_PRESENT(BIC_UNCORE_MHZ);
+ BIC_PRESENT(BIC_UNCORE_MHZ);
if (quiet)
return;
@@ -4588,40 +5313,178 @@ static void probe_intel_uncore_frequency(void)
for (i = 0; i < topo.num_packages; ++i) {
for (j = 0; j < topo.num_die; ++j) {
int k, l;
+ char path_base[128];
+
+ sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i,
+ j);
- sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/min_freq_khz",
- i, j);
+ sprintf(path, "%s/min_freq_khz", path_base);
k = read_sysfs_int(path);
- sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/max_freq_khz",
- i, j);
+ sprintf(path, "%s/max_freq_khz", path_base);
l = read_sysfs_int(path);
- fprintf(outf, "Uncore Frequency pkg%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000);
+ fprintf(outf, "Uncore Frequency package%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000);
- sprintf(path,
- "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_min_freq_khz",
- i, j);
+ sprintf(path, "%s/initial_min_freq_khz", path_base);
k = read_sysfs_int(path);
- sprintf(path,
- "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_max_freq_khz",
- i, j);
+ sprintf(path, "%s/initial_max_freq_khz", path_base);
l = read_sysfs_int(path);
- fprintf(outf, "(%d - %d MHz)\n", k / 1000, l / 1000);
+ fprintf(outf, "(%d - %d MHz)", k / 1000, l / 1000);
+
+ sprintf(path, "%s/current_freq_khz", path_base);
+ k = read_sysfs_int(path);
+ fprintf(outf, " %d MHz\n", k / 1000);
}
}
+ return;
+
+probe_cluster:
+ if (access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00/current_freq_khz", R_OK))
+ return;
+
+ if (quiet)
+ return;
+
+ for (i = 0;; ++i) {
+ int k, l;
+ char path_base[128];
+ int package_id, domain_id, cluster_id;
+
+ sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/uncore%02d", i);
+
+ if (access(path_base, R_OK))
+ break;
+
+ sprintf(path, "%s/package_id", path_base);
+ package_id = read_sysfs_int(path);
+
+ sprintf(path, "%s/domain_id", path_base);
+ domain_id = read_sysfs_int(path);
+
+ sprintf(path, "%s/fabric_cluster_id", path_base);
+ cluster_id = read_sysfs_int(path);
+
+ sprintf(path, "%s/min_freq_khz", path_base);
+ k = read_sysfs_int(path);
+ sprintf(path, "%s/max_freq_khz", path_base);
+ l = read_sysfs_int(path);
+ fprintf(outf, "Uncore Frequency package%d domain%d cluster%d: %d - %d MHz ", package_id, domain_id,
+ cluster_id, k / 1000, l / 1000);
+
+ sprintf(path, "%s/initial_min_freq_khz", path_base);
+ k = read_sysfs_int(path);
+ sprintf(path, "%s/initial_max_freq_khz", path_base);
+ l = read_sysfs_int(path);
+ fprintf(outf, "(%d - %d MHz)", k / 1000, l / 1000);
+
+ sprintf(path, "%s/current_freq_khz", path_base);
+ k = read_sysfs_int(path);
+ fprintf(outf, " %d MHz\n", k / 1000);
+ }
}
static void probe_graphics(void)
{
+ /* Xe graphics sysfs knobs */
+ if (!access("/sys/class/drm/card0/device/tile0/gt0/gtidle/idle_residency_ms", R_OK)) {
+ FILE *fp;
+ char buf[8];
+ bool gt0_is_gt;
+ int idx;
+
+ fp = fopen("/sys/class/drm/card0/device/tile0/gt0/gtidle/name", "r");
+ if (!fp)
+ goto next;
+
+ if (!fread(buf, sizeof(char), 7, fp)) {
+ fclose(fp);
+ goto next;
+ }
+ fclose(fp);
+
+ if (!strncmp(buf, "gt0-rc", strlen("gt0-rc")))
+ gt0_is_gt = true;
+ else if (!strncmp(buf, "gt0-mc", strlen("gt0-mc")))
+ gt0_is_gt = false;
+ else
+ goto next;
+
+ idx = gt0_is_gt ? GFX_rc6 : SAM_mc6;
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt0/gtidle/idle_residency_ms";
+
+ idx = gt0_is_gt ? GFX_MHz : SAM_MHz;
+ if (!access("/sys/class/drm/card0/device/tile0/gt0/freq0/cur_freq", R_OK))
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt0/freq0/cur_freq";
+
+ idx = gt0_is_gt ? GFX_ACTMHz : SAM_ACTMHz;
+ if (!access("/sys/class/drm/card0/device/tile0/gt0/freq0/act_freq", R_OK))
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt0/freq0/act_freq";
+
+ idx = gt0_is_gt ? SAM_mc6 : GFX_rc6;
+ if (!access("/sys/class/drm/card0/device/tile0/gt1/gtidle/idle_residency_ms", R_OK))
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt1/gtidle/idle_residency_ms";
+
+ idx = gt0_is_gt ? SAM_MHz : GFX_MHz;
+ if (!access("/sys/class/drm/card0/device/tile0/gt1/freq0/cur_freq", R_OK))
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt1/freq0/cur_freq";
+
+ idx = gt0_is_gt ? SAM_ACTMHz : GFX_ACTMHz;
+ if (!access("/sys/class/drm/card0/device/tile0/gt1/freq0/act_freq", R_OK))
+ gfx_info[idx].path = "/sys/class/drm/card0/device/tile0/gt1/freq0/act_freq";
+
+ goto end;
+ }
+
+next:
+ /* New i915 graphics sysfs knobs */
+ if (!access("/sys/class/drm/card0/gt/gt0/rc6_residency_ms", R_OK)) {
+ gfx_info[GFX_rc6].path = "/sys/class/drm/card0/gt/gt0/rc6_residency_ms";
+
+ if (!access("/sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz", R_OK))
+ gfx_info[GFX_MHz].path = "/sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz";
+
+ if (!access("/sys/class/drm/card0/gt/gt0/rps_act_freq_mhz", R_OK))
+ gfx_info[GFX_ACTMHz].path = "/sys/class/drm/card0/gt/gt0/rps_act_freq_mhz";
+
+ if (!access("/sys/class/drm/card0/gt/gt1/rc6_residency_ms", R_OK))
+ gfx_info[SAM_mc6].path = "/sys/class/drm/card0/gt/gt1/rc6_residency_ms";
+
+ if (!access("/sys/class/drm/card0/gt/gt1/rps_cur_freq_mhz", R_OK))
+ gfx_info[SAM_MHz].path = "/sys/class/drm/card0/gt/gt1/rps_cur_freq_mhz";
+
+ if (!access("/sys/class/drm/card0/gt/gt1/rps_act_freq_mhz", R_OK))
+ gfx_info[SAM_ACTMHz].path = "/sys/class/drm/card0/gt/gt1/rps_act_freq_mhz";
+
+ goto end;
+ }
+
+ /* Fall back to traditional i915 graphics sysfs knobs */
if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK))
- BIC_PRESENT(BIC_GFX_rc6);
+ gfx_info[GFX_rc6].path = "/sys/class/drm/card0/power/rc6_residency_ms";
+
+ if (!access("/sys/class/drm/card0/gt_cur_freq_mhz", R_OK))
+ gfx_info[GFX_MHz].path = "/sys/class/drm/card0/gt_cur_freq_mhz";
+ else if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
+ gfx_info[GFX_MHz].path = "/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz";
- if (!access("/sys/class/drm/card0/gt_cur_freq_mhz", R_OK) ||
- !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
- BIC_PRESENT(BIC_GFXMHz);
- if (!access("/sys/class/drm/card0/gt_act_freq_mhz", R_OK) ||
- !access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK))
+ if (!access("/sys/class/drm/card0/gt_act_freq_mhz", R_OK))
+ gfx_info[GFX_ACTMHz].path = "/sys/class/drm/card0/gt_act_freq_mhz";
+ else if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK))
+ gfx_info[GFX_ACTMHz].path = "/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz";
+
+end:
+ if (gfx_info[GFX_rc6].path)
+ BIC_PRESENT(BIC_GFX_rc6);
+ if (gfx_info[GFX_MHz].path)
+ BIC_PRESENT(BIC_GFXMHz);
+ if (gfx_info[GFX_ACTMHz].path)
BIC_PRESENT(BIC_GFXACTMHz);
+ if (gfx_info[SAM_mc6].path)
+ BIC_PRESENT(BIC_SAM_mc6);
+ if (gfx_info[SAM_MHz].path)
+ BIC_PRESENT(BIC_SAMMHz);
+ if (gfx_info[SAM_ACTMHz].path)
+ BIC_PRESENT(BIC_SAMACTMHz);
}
static void dump_sysfs_cstate_config(void)
@@ -4783,6 +5646,9 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
UNUSED(c);
UNUSED(p);
+ if (no_msr)
+ return 0;
+
if (!has_hwp)
return 0;
@@ -4869,6 +5735,9 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
UNUSED(c);
UNUSED(p);
+ if (no_msr)
+ return 0;
+
cpu = t->cpu_id;
/* per-package */
@@ -4983,31 +5852,18 @@ void rapl_probe_intel(void)
unsigned long long msr;
unsigned int time_unit;
double tdp;
+ const unsigned long long bic_watt_bits = BIC_PkgWatt | BIC_CorWatt | BIC_RAMWatt | BIC_GFXWatt;
+ const unsigned long long bic_joules_bits = BIC_Pkg_J | BIC_Cor_J | BIC_RAM_J | BIC_GFX_J;
- if (rapl_joules) {
- if (platform->rapl_msrs & RAPL_PKG_ENERGY_STATUS)
- BIC_PRESENT(BIC_Pkg_J);
- if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS)
- BIC_PRESENT(BIC_Cor_J);
- if (platform->rapl_msrs & RAPL_DRAM_ENERGY_STATUS)
- BIC_PRESENT(BIC_RAM_J);
- if (platform->rapl_msrs & RAPL_GFX_ENERGY_STATUS)
- BIC_PRESENT(BIC_GFX_J);
- } else {
- if (platform->rapl_msrs & RAPL_PKG_ENERGY_STATUS)
- BIC_PRESENT(BIC_PkgWatt);
- if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS)
- BIC_PRESENT(BIC_CorWatt);
- if (platform->rapl_msrs & RAPL_DRAM_ENERGY_STATUS)
- BIC_PRESENT(BIC_RAMWatt);
- if (platform->rapl_msrs & RAPL_GFX_ENERGY_STATUS)
- BIC_PRESENT(BIC_GFXWatt);
- }
+ if (rapl_joules)
+ bic_enabled &= ~bic_watt_bits;
+ else
+ bic_enabled &= ~bic_joules_bits;
- if (platform->rapl_msrs & RAPL_PKG_PERF_STATUS)
- BIC_PRESENT(BIC_PKG__);
- if (platform->rapl_msrs & RAPL_DRAM_PERF_STATUS)
- BIC_PRESENT(BIC_RAM__);
+ if (!(platform->rapl_msrs & RAPL_PKG_PERF_STATUS))
+ bic_enabled &= ~BIC_PKG__;
+ if (!(platform->rapl_msrs & RAPL_DRAM_PERF_STATUS))
+ bic_enabled &= ~BIC_RAM__;
/* units on package 0, verify later other packages match */
if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
@@ -5041,14 +5897,13 @@ void rapl_probe_amd(void)
{
unsigned long long msr;
double tdp;
+ const unsigned long long bic_watt_bits = BIC_PkgWatt | BIC_CorWatt;
+ const unsigned long long bic_joules_bits = BIC_Pkg_J | BIC_Cor_J;
- if (rapl_joules) {
- BIC_PRESENT(BIC_Pkg_J);
- BIC_PRESENT(BIC_Cor_J);
- } else {
- BIC_PRESENT(BIC_PkgWatt);
- BIC_PRESENT(BIC_CorWatt);
- }
+ if (rapl_joules)
+ bic_enabled &= ~bic_watt_bits;
+ else
+ bic_enabled &= ~bic_joules_bits;
if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
return;
@@ -5202,7 +6057,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
*/
void probe_rapl(void)
{
- if (!platform->rapl_msrs)
+ if (!platform->rapl_msrs || no_msr)
return;
if (genuine_intel)
@@ -5258,7 +6113,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
}
/* Temperature Target MSR is Nehalem and newer only */
- if (!platform->has_nhm_msrs)
+ if (!platform->has_nhm_msrs || no_msr)
goto guess;
if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
@@ -5305,6 +6160,9 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
UNUSED(c);
UNUSED(p);
+ if (no_msr)
+ return 0;
+
if (!(do_dts || do_ptm))
return 0;
@@ -5402,6 +6260,9 @@ void decode_feature_control_msr(void)
{
unsigned long long msr;
+ if (no_msr)
+ return;
+
if (!get_msr(base_cpu, MSR_IA32_FEAT_CTL, &msr))
fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n",
base_cpu, msr, msr & FEAT_CTL_LOCKED ? "" : "UN-", msr & (1 << 18) ? "SGX" : "");
@@ -5411,6 +6272,9 @@ void decode_misc_enable_msr(void)
{
unsigned long long msr;
+ if (no_msr)
+ return;
+
if (!genuine_intel)
return;
@@ -5428,6 +6292,9 @@ void decode_misc_feature_control(void)
{
unsigned long long msr;
+ if (no_msr)
+ return;
+
if (!platform->has_msr_misc_feature_control)
return;
@@ -5449,6 +6316,9 @@ void decode_misc_pwr_mgmt_msr(void)
{
unsigned long long msr;
+ if (no_msr)
+ return;
+
if (!platform->has_msr_misc_pwr_mgmt)
return;
@@ -5468,6 +6338,9 @@ void decode_c6_demotion_policy_msr(void)
{
unsigned long long msr;
+ if (no_msr)
+ return;
+
if (!platform->has_msr_c6_demotion_policy_config)
return;
@@ -5489,7 +6362,8 @@ void print_dev_latency(void)
fd = open(path, O_RDONLY);
if (fd < 0) {
- warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
+ if (debug)
+ warnx("Read %s failed", path);
return;
}
@@ -5504,23 +6378,260 @@ void print_dev_latency(void)
close(fd);
}
+static int has_instr_count_access(void)
+{
+ int fd;
+ int has_access;
+
+ if (no_perf)
+ return 0;
+
+ fd = open_perf_counter(base_cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, -1, 0);
+ has_access = fd != -1;
+
+ if (fd != -1)
+ close(fd);
+
+ if (!has_access)
+ warnx("Failed to access %s. Some of the counters may not be available\n"
+ "\tRun as root to enable them or use %s to disable the access explicitly",
+ "instructions retired perf counter", "--no-perf");
+
+ return has_access;
+}
+
+bool is_aperf_access_required(void)
+{
+ return BIC_IS_ENABLED(BIC_Avg_MHz)
+ || BIC_IS_ENABLED(BIC_Busy)
+ || BIC_IS_ENABLED(BIC_Bzy_MHz)
+ || BIC_IS_ENABLED(BIC_IPC);
+}
+
+int add_rapl_perf_counter_(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai,
+ double *scale_, enum rapl_unit *unit_)
+{
+ if (no_perf)
+ return -1;
+
+ const double scale = read_perf_rapl_scale(cai->perf_subsys, cai->perf_name);
+
+ if (scale == 0.0)
+ return -1;
+
+ const enum rapl_unit unit = read_perf_rapl_unit(cai->perf_subsys, cai->perf_name);
+
+ if (unit == RAPL_UNIT_INVALID)
+ return -1;
+
+ const unsigned int rapl_type = read_perf_type(cai->perf_subsys);
+ const unsigned int rapl_energy_pkg_config = read_rapl_config(cai->perf_subsys, cai->perf_name);
+
+ const int fd_counter =
+ open_perf_counter(cpu, rapl_type, rapl_energy_pkg_config, rci->fd_perf, PERF_FORMAT_GROUP);
+ if (fd_counter == -1)
+ return -1;
+
+ /* If it's the first counter opened, make it a group descriptor */
+ if (rci->fd_perf == -1)
+ rci->fd_perf = fd_counter;
+
+ *scale_ = scale;
+ *unit_ = unit;
+ return fd_counter;
+}
+
+int add_rapl_perf_counter(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai,
+ double *scale, enum rapl_unit *unit)
+{
+ int ret = add_rapl_perf_counter_(cpu, rci, cai, scale, unit);
+
+ if (debug)
+ fprintf(stderr, "%s: %d (cpu: %d)\n", __func__, ret, cpu);
+
+ return ret;
+}
+
/*
* Linux-perf manages the HW instructions-retired counter
* by enabling when requested, and hiding rollover
*/
void linux_perf_init(void)
{
- if (!BIC_IS_ENABLED(BIC_IPC))
- return;
-
if (access("/proc/sys/kernel/perf_event_paranoid", F_OK))
return;
- fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
- if (fd_instr_count_percpu == NULL)
- err(-1, "calloc fd_instr_count_percpu");
+ if (BIC_IS_ENABLED(BIC_IPC) && has_aperf) {
+ fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
+ if (fd_instr_count_percpu == NULL)
+ err(-1, "calloc fd_instr_count_percpu");
+ }
+
+ const bool aperf_required = is_aperf_access_required();
+
+ if (aperf_required && has_aperf && amperf_source == AMPERF_SOURCE_PERF) {
+ fd_amperf_percpu = calloc(topo.max_cpu_num + 1, sizeof(*fd_amperf_percpu));
+ if (fd_amperf_percpu == NULL)
+ err(-1, "calloc fd_amperf_percpu");
+ }
+}
+
+void rapl_perf_init(void)
+{
+ const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
+ bool *domain_visited = calloc(num_domains, sizeof(bool));
+
+ rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
+ if (rapl_counter_info_perdomain == NULL)
+ err(-1, "calloc rapl_counter_info_percpu");
+
+ /*
+ * Initialize rapl_counter_info_percpu
+ */
+ for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain_id];
+
+ rci->fd_perf = -1;
+ for (size_t i = 0; i < NUM_RAPL_COUNTERS; ++i) {
+ rci->data[i] = 0;
+ rci->source[i] = RAPL_SOURCE_NONE;
+ }
+ }
- BIC_PRESENT(BIC_IPC);
+ /*
+ * Open/probe the counters
+ * If can't get it via perf, fallback to MSR
+ */
+ for (size_t i = 0; i < ARRAY_SIZE(rapl_counter_arch_infos); ++i) {
+
+ const struct rapl_counter_arch_info *const cai = &rapl_counter_arch_infos[i];
+ bool has_counter = 0;
+ double scale;
+ enum rapl_unit unit;
+ int next_domain;
+
+ memset(domain_visited, 0, num_domains * sizeof(*domain_visited));
+
+ for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) {
+
+ if (cpu_is_not_allowed(cpu))
+ continue;
+
+ /* Skip already seen and handled RAPL domains */
+ next_domain =
+ platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
+
+ if (domain_visited[next_domain])
+ continue;
+
+ domain_visited[next_domain] = 1;
+
+ struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[next_domain];
+
+ /* Check if the counter is enabled and accessible */
+ if (BIC_IS_ENABLED(cai->bic) && (platform->rapl_msrs & cai->feature_mask)) {
+
+ /* Use perf API for this counter */
+ if (!no_perf && cai->perf_name
+ && add_rapl_perf_counter(cpu, rci, cai, &scale, &unit) != -1) {
+ rci->source[cai->rci_index] = RAPL_SOURCE_PERF;
+ rci->scale[cai->rci_index] = scale * cai->compat_scale;
+ rci->unit[cai->rci_index] = unit;
+ rci->flags[cai->rci_index] = cai->flags;
+
+ /* Use MSR for this counter */
+ } else if (!no_msr && cai->msr && probe_msr(cpu, cai->msr) == 0) {
+ rci->source[cai->rci_index] = RAPL_SOURCE_MSR;
+ rci->msr[cai->rci_index] = cai->msr;
+ rci->msr_mask[cai->rci_index] = cai->msr_mask;
+ rci->msr_shift[cai->rci_index] = cai->msr_shift;
+ rci->unit[cai->rci_index] = RAPL_UNIT_JOULES;
+ rci->scale[cai->rci_index] = *cai->platform_rapl_msr_scale * cai->compat_scale;
+ rci->flags[cai->rci_index] = cai->flags;
+ }
+ }
+
+ if (rci->source[cai->rci_index] != RAPL_SOURCE_NONE)
+ has_counter = 1;
+ }
+
+ /* If any CPU has access to the counter, make it present */
+ if (has_counter)
+ BIC_PRESENT(cai->bic);
+ }
+
+ free(domain_visited);
+}
+
+static int has_amperf_access_via_msr(void)
+{
+ if (no_msr)
+ return 0;
+
+ if (probe_msr(base_cpu, MSR_IA32_APERF))
+ return 0;
+
+ if (probe_msr(base_cpu, MSR_IA32_MPERF))
+ return 0;
+
+ return 1;
+}
+
+static int has_amperf_access_via_perf(void)
+{
+ struct amperf_group_fd fds;
+
+ /*
+ * Cache the last result, so we don't warn the user multiple times
+ *
+ * Negative means cached, no access
+ * Zero means not cached
+ * Positive means cached, has access
+ */
+ static int has_access_cached;
+
+ if (no_perf)
+ return 0;
+
+ if (has_access_cached != 0)
+ return has_access_cached > 0;
+
+ fds = open_amperf_fd(base_cpu);
+ has_access_cached = (fds.aperf != -1) && (fds.mperf != -1);
+
+ if (fds.aperf == -1)
+ warnx("Failed to access %s. Some of the counters may not be available\n"
+ "\tRun as root to enable them or use %s to disable the access explicitly",
+ "APERF perf counter", "--no-perf");
+ else
+ close(fds.aperf);
+
+ if (fds.mperf == -1)
+ warnx("Failed to access %s. Some of the counters may not be available\n"
+ "\tRun as root to enable them or use %s to disable the access explicitly",
+ "MPERF perf counter", "--no-perf");
+ else
+ close(fds.mperf);
+
+ if (has_access_cached == 0)
+ has_access_cached = -1;
+
+ return has_access_cached > 0;
+}
+
+/* Check if we can access APERF and MPERF */
+static int has_amperf_access(void)
+{
+ if (!is_aperf_access_required())
+ return 0;
+
+ if (!no_msr && has_amperf_access_via_msr())
+ return 1;
+
+ if (!no_perf && has_amperf_access_via_perf())
+ return 1;
+
+ return 0;
}
void probe_cstates(void)
@@ -5563,7 +6674,7 @@ void probe_cstates(void)
if (platform->has_msr_module_c6_res_ms)
BIC_PRESENT(BIC_Mod_c6);
- if (platform->has_ext_cst_msrs) {
+ if (platform->has_ext_cst_msrs && !no_msr) {
BIC_PRESENT(BIC_Totl_c0);
BIC_PRESENT(BIC_Any_c0);
BIC_PRESENT(BIC_GFX_c0);
@@ -5623,6 +6734,7 @@ void process_cpuid()
unsigned int eax, ebx, ecx, edx;
unsigned int fms, family, model, stepping, ecx_flags, edx_flags;
unsigned long long ucode_patch = 0;
+ bool ucode_patch_valid = false;
eax = ebx = ecx = edx = 0;
@@ -5650,8 +6762,12 @@ void process_cpuid()
ecx_flags = ecx;
edx_flags = edx;
- if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
- warnx("get_msr(UCODE)");
+ if (!no_msr) {
+ if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
+ warnx("get_msr(UCODE)");
+ else
+ ucode_patch_valid = true;
+ }
/*
* check max extended function levels of CPUID.
@@ -5662,9 +6778,12 @@ void process_cpuid()
__cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
if (!quiet) {
- fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d) microcode 0x%x\n",
- family, model, stepping, family, model, stepping,
- (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
+ fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)",
+ family, model, stepping, family, model, stepping);
+ if (ucode_patch_valid)
+ fprintf(outf, " microcode 0x%x", (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
+ fputc('\n', outf);
+
fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level);
fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
ecx_flags & (1 << 0) ? "SSE3" : "-",
@@ -5700,10 +6819,11 @@ void process_cpuid()
__cpuid(0x6, eax, ebx, ecx, edx);
has_aperf = ecx & (1 << 0);
- if (has_aperf) {
+ if (has_aperf && has_amperf_access()) {
BIC_PRESENT(BIC_Avg_MHz);
BIC_PRESENT(BIC_Busy);
BIC_PRESENT(BIC_Bzy_MHz);
+ BIC_PRESENT(BIC_IPC);
}
do_dts = eax & (1 << 0);
if (do_dts)
@@ -5786,6 +6906,15 @@ void process_cpuid()
base_mhz = max_mhz = bus_mhz = edx = 0;
__cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx);
+
+ bclk = bus_mhz;
+
+ base_hz = base_mhz * 1000000;
+ has_base_hz = 1;
+
+ if (platform->enable_tsc_tweak)
+ tsc_tweak = base_hz / tsc_hz;
+
if (!quiet)
fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
base_mhz, max_mhz, bus_mhz);
@@ -5814,7 +6943,7 @@ void probe_pm_features(void)
probe_thermal();
- if (platform->has_nhm_msrs)
+ if (platform->has_nhm_msrs && !no_msr)
BIC_PRESENT(BIC_SMI);
if (!quiet)
@@ -6142,6 +7271,7 @@ void topology_update(void)
topo.allowed_packages = 0;
for_all_cpus(update_topo, ODD_COUNTERS);
}
+
void setup_all_buffers(bool startup)
{
topology_probe(startup);
@@ -6169,21 +7299,129 @@ void set_base_cpu(void)
err(-ENODEV, "No valid cpus found");
}
+static void set_amperf_source(void)
+{
+ amperf_source = AMPERF_SOURCE_PERF;
+
+ const bool aperf_required = is_aperf_access_required();
+
+ if (no_perf || !aperf_required || !has_amperf_access_via_perf())
+ amperf_source = AMPERF_SOURCE_MSR;
+
+ if (quiet || !debug)
+ return;
+
+ fprintf(outf, "aperf/mperf source preference: %s\n", amperf_source == AMPERF_SOURCE_MSR ? "msr" : "perf");
+}
+
+bool has_added_counters(void)
+{
+ /*
+ * It only makes sense to call this after the command line is parsed,
+ * otherwise sys structure is not populated.
+ */
+
+ return sys.added_core_counters | sys.added_thread_counters | sys.added_package_counters;
+}
+
+bool is_msr_access_required(void)
+{
+ if (no_msr)
+ return false;
+
+ if (has_added_counters())
+ return true;
+
+ return BIC_IS_ENABLED(BIC_SMI)
+ || BIC_IS_ENABLED(BIC_CPU_c1)
+ || BIC_IS_ENABLED(BIC_CPU_c3)
+ || BIC_IS_ENABLED(BIC_CPU_c6)
+ || BIC_IS_ENABLED(BIC_CPU_c7)
+ || BIC_IS_ENABLED(BIC_Mod_c6)
+ || BIC_IS_ENABLED(BIC_CoreTmp)
+ || BIC_IS_ENABLED(BIC_Totl_c0)
+ || BIC_IS_ENABLED(BIC_Any_c0)
+ || BIC_IS_ENABLED(BIC_GFX_c0)
+ || BIC_IS_ENABLED(BIC_CPUGFX)
+ || BIC_IS_ENABLED(BIC_Pkgpc3)
+ || BIC_IS_ENABLED(BIC_Pkgpc6)
+ || BIC_IS_ENABLED(BIC_Pkgpc2)
+ || BIC_IS_ENABLED(BIC_Pkgpc7)
+ || BIC_IS_ENABLED(BIC_Pkgpc8)
+ || BIC_IS_ENABLED(BIC_Pkgpc9)
+ || BIC_IS_ENABLED(BIC_Pkgpc10)
+ /* TODO: Multiplex access with perf */
+ || BIC_IS_ENABLED(BIC_CorWatt)
+ || BIC_IS_ENABLED(BIC_Cor_J)
+ || BIC_IS_ENABLED(BIC_PkgWatt)
+ || BIC_IS_ENABLED(BIC_CorWatt)
+ || BIC_IS_ENABLED(BIC_GFXWatt)
+ || BIC_IS_ENABLED(BIC_RAMWatt)
+ || BIC_IS_ENABLED(BIC_Pkg_J)
+ || BIC_IS_ENABLED(BIC_Cor_J)
+ || BIC_IS_ENABLED(BIC_GFX_J)
+ || BIC_IS_ENABLED(BIC_RAM_J)
+ || BIC_IS_ENABLED(BIC_PKG__)
+ || BIC_IS_ENABLED(BIC_RAM__)
+ || BIC_IS_ENABLED(BIC_PkgTmp)
+ || (is_aperf_access_required() && !has_amperf_access_via_perf());
+}
+
+void check_msr_access(void)
+{
+ if (!is_msr_access_required())
+ no_msr = 1;
+
+ check_dev_msr();
+ check_msr_permission();
+
+ if (no_msr)
+ bic_disable_msr_access();
+}
+
+void check_perf_access(void)
+{
+ const bool intrcount_required = BIC_IS_ENABLED(BIC_IPC);
+
+ if (no_perf || !intrcount_required || !has_instr_count_access())
+ bic_enabled &= ~BIC_IPC;
+
+ const bool aperf_required = is_aperf_access_required();
+
+ if (!aperf_required || !has_amperf_access()) {
+ bic_enabled &= ~BIC_Avg_MHz;
+ bic_enabled &= ~BIC_Busy;
+ bic_enabled &= ~BIC_Bzy_MHz;
+ bic_enabled &= ~BIC_IPC;
+ }
+}
+
void turbostat_init()
{
setup_all_buffers(true);
set_base_cpu();
- check_dev_msr();
- check_permissions();
+ check_msr_access();
+ check_perf_access();
process_cpuid();
probe_pm_features();
+ set_amperf_source();
linux_perf_init();
+ rapl_perf_init();
for_all_cpus(get_cpu_type, ODD_COUNTERS);
for_all_cpus(get_cpu_type, EVEN_COUNTERS);
if (DO_BIC(BIC_IPC))
(void)get_instr_count_fd(base_cpu);
+
+ /*
+ * If TSC tweak is needed, but couldn't get it,
+ * disable more BICs, since it can't be reported accurately.
+ */
+ if (platform->enable_tsc_tweak && !has_base_hz) {
+ bic_enabled &= ~BIC_Busy;
+ bic_enabled &= ~BIC_Bzy_MHz;
+ }
}
int fork_it(char **argv)
@@ -6259,7 +7497,7 @@ int get_and_dump_counters(void)
void print_version()
{
- fprintf(outf, "turbostat version 2023.11.07 - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2024.04.08 - Len Brown <lenb@kernel.org>\n");
}
#define COMMAND_LINE_SIZE 2048
@@ -6291,6 +7529,9 @@ int add_counter(unsigned int msr_num, char *path, char *name,
{
struct msr_counter *msrp;
+ if (no_msr && msr_num)
+ errx(1, "Requested MSR counter 0x%x, but in --no-msr mode", msr_num);
+
msrp = calloc(1, sizeof(struct msr_counter));
if (msrp == NULL) {
perror("calloc");
@@ -6595,6 +7836,8 @@ void cmdline(int argc, char **argv)
{ "list", no_argument, 0, 'l' },
{ "out", required_argument, 0, 'o' },
{ "quiet", no_argument, 0, 'q' },
+ { "no-msr", no_argument, 0, 'M' },
+ { "no-perf", no_argument, 0, 'P' },
{ "show", required_argument, 0, 's' },
{ "Summary", no_argument, 0, 'S' },
{ "TCC", required_argument, 0, 'T' },
@@ -6604,7 +7847,25 @@ void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", long_options, &option_index)) != -1) {
+ /*
+ * Parse some options early, because they may make other options invalid,
+ * like adding the MSR counter with --add and at the same time using --no-msr.
+ */
+ while ((opt = getopt_long_only(argc, argv, "MP", long_options, &option_index)) != -1) {
+ switch (opt) {
+ case 'M':
+ no_msr = 1;
+ break;
+ case 'P':
+ no_perf = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ optind = 0;
+
+ while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qMST:v", long_options, &option_index)) != -1) {
switch (opt) {
case 'a':
parse_add_command(optarg);
@@ -6662,6 +7923,10 @@ void cmdline(int argc, char **argv)
case 'q':
quiet = 1;
break;
+ case 'M':
+ case 'P':
+ /* Parsed earlier */
+ break;
case 'n':
num_iterations = strtod(optarg, NULL);
@@ -6704,6 +7969,22 @@ void cmdline(int argc, char **argv)
}
}
+void set_rlimit(void)
+{
+ struct rlimit limit;
+
+ if (getrlimit(RLIMIT_NOFILE, &limit) < 0)
+ err(1, "Failed to get rlimit");
+
+ if (limit.rlim_max < MAX_NOFILE)
+ limit.rlim_max = MAX_NOFILE;
+ if (limit.rlim_cur < MAX_NOFILE)
+ limit.rlim_cur = MAX_NOFILE;
+
+ if (setrlimit(RLIMIT_NOFILE, &limit) < 0)
+ err(1, "Failed to set rlimit");
+}
+
int main(int argc, char **argv)
{
int fd, ret;
@@ -6729,9 +8010,13 @@ skip_cgroup_setting:
probe_sysfs();
+ if (!getuid())
+ set_rlimit();
+
turbostat_init();
- msr_sum_record();
+ if (!no_msr)
+ msr_sum_record();
/* dump counters and exit */
if (dump_only)
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 908e0d083936..61c69297e797 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -986,10 +986,12 @@ static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
{
dpa_perf->qos_class = FAKE_QTG_ID;
dpa_perf->dpa_range = *range;
- dpa_perf->coord.read_latency = 500;
- dpa_perf->coord.write_latency = 500;
- dpa_perf->coord.read_bandwidth = 1000;
- dpa_perf->coord.write_bandwidth = 1000;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ dpa_perf->coord[i].read_latency = 500;
+ dpa_perf->coord[i].write_latency = 500;
+ dpa_perf->coord[i].read_bandwidth = 1000;
+ dpa_perf->coord[i].write_bandwidth = 1000;
+ }
}
static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index aa5ec149f96c..b3b00269a52a 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -28,6 +28,8 @@ CONFIG_MCTP_FLOWS=y
CONFIG_INET=y
CONFIG_MPTCP=y
+CONFIG_NETDEVICES=y
+CONFIG_WLAN=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_WLAN_VENDOR_INTEL=y
@@ -38,6 +40,7 @@ CONFIG_DAMON_VADDR=y
CONFIG_DAMON_PADDR=y
CONFIG_DEBUG_FS=y
CONFIG_DAMON_DBGFS=y
+CONFIG_DAMON_DBGFS_DEPRECATED=y
CONFIG_REGMAP_BUILD=y
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
index bcf195c64a45..567491f3e1b5 100644
--- a/tools/testing/selftests/bpf/bpf_arena_common.h
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -32,7 +32,7 @@
*/
#endif
-#if defined(__BPF_FEATURE_ARENA_CAST) && !defined(BPF_ARENA_FORCE_ASM)
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
#define __arena __attribute__((address_space(1)))
#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_htab.c b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
index 0766702de846..d69fd2465f53 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_htab.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
@@ -3,12 +3,14 @@
#include <test_progs.h>
#include <sys/mman.h>
#include <network_helpers.h>
-
+#include <sys/user.h>
+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
+#include <unistd.h>
+#define PAGE_SIZE getpagesize()
+#endif
#include "arena_htab_asm.skel.h"
#include "arena_htab.skel.h"
-#define PAGE_SIZE 4096
-
#include "bpf_arena_htab.h"
static void test_arena_htab_common(struct htab *htab)
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_list.c b/tools/testing/selftests/bpf/prog_tests/arena_list.c
index e61886debab1..d15867cddde0 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_list.c
@@ -3,8 +3,11 @@
#include <test_progs.h>
#include <sys/mman.h>
#include <network_helpers.h>
-
-#define PAGE_SIZE 4096
+#include <sys/user.h>
+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
+#include <unistd.h>
+#define PAGE_SIZE getpagesize()
+#endif
#include "bpf_arena_list.h"
#include "arena_list.skel.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
index 053f4d6da77a..cc184e4420f6 100644
--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <sys/syscall.h>
+#include <limits.h>
#include <test_progs.h>
#include "bloom_filter_map.skel.h"
@@ -21,6 +22,11 @@ static void test_fail_cases(void)
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0"))
close(fd);
+ /* Invalid value size: too big */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, INT32_MAX, 100, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value too large"))
+ close(fd);
+
/* Invalid max entries size */
fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL);
if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size"))
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 985273832f89..c4f9f306646e 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -5,6 +5,7 @@
#include "cap_helpers.h"
#include "verifier_and.skel.h"
#include "verifier_arena.skel.h"
+#include "verifier_arena_large.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bitfield_write.skel.h"
@@ -120,6 +121,7 @@ static void run_tests_aux(const char *skel_name,
void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_arena(void) { RUN(verifier_arena); }
+void test_verifier_arena_large(void) { RUN(verifier_arena_large); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); }
void test_verifier_bounds(void) { RUN(verifier_bounds); }
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
index b7bb712cacfd..1e6ac187a6a0 100644
--- a/tools/testing/selftests/bpf/progs/arena_htab.c
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -22,7 +22,7 @@ int zero = 0;
SEC("syscall")
int arena_htab_llvm(void *ctx)
{
-#if defined(__BPF_FEATURE_ARENA_CAST) || defined(BPF_ARENA_FORCE_ASM)
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
struct htab __arena *htab;
__u64 i;
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
index cd35b8448435..c0422c58cee2 100644
--- a/tools/testing/selftests/bpf/progs/arena_list.c
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -30,13 +30,13 @@ int list_sum;
int cnt;
bool skip = false;
-#ifdef __BPF_FEATURE_ARENA_CAST
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
long __arena arena_sum;
int __arena test_val = 1;
struct arena_list_head __arena global_head;
#else
-long arena_sum SEC(".arena.1");
-int test_val SEC(".arena.1");
+long arena_sum SEC(".addr_space.1");
+int test_val SEC(".addr_space.1");
#endif
int zero;
@@ -44,7 +44,7 @@ int zero;
SEC("syscall")
int arena_list_add(void *ctx)
{
-#ifdef __BPF_FEATURE_ARENA_CAST
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
__u64 i;
list_head = &global_head;
@@ -66,7 +66,7 @@ int arena_list_add(void *ctx)
SEC("syscall")
int arena_list_del(void *ctx)
{
-#ifdef __BPF_FEATURE_ARENA_CAST
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
struct elem __arena *n;
int sum = 0;
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
index 5540b05ff9ee..93144ae6df74 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -12,14 +12,18 @@ struct {
__uint(type, BPF_MAP_TYPE_ARENA);
__uint(map_flags, BPF_F_MMAPABLE);
__uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
- __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+#else
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+#endif
} arena SEC(".maps");
SEC("syscall")
__success __retval(0)
int basic_alloc1(void *ctx)
{
-#if defined(__BPF_FEATURE_ARENA_CAST)
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
volatile int __arena *page1, *page2, *no_page, *page3;
page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
@@ -58,7 +62,7 @@ SEC("syscall")
__success __retval(0)
int basic_alloc2(void *ctx)
{
-#if defined(__BPF_FEATURE_ARENA_CAST)
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
volatile char __arena *page1, *page2, *page3, *page4;
page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
new file mode 100644
index 000000000000..ef66ea460264
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+#define ARENA_SIZE (1ull << 32)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, ARENA_SIZE / PAGE_SIZE);
+} arena SEC(".maps");
+
+SEC("syscall")
+__success __retval(0)
+int big_alloc1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page1, *page2, *no_page, *page3;
+ void __arena *base;
+
+ page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ *page1 = 1;
+ page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+ 1, NUMA_NO_NODE, 0);
+ if (!page2)
+ return 2;
+ *page2 = 2;
+ no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE,
+ 1, NUMA_NO_NODE, 0);
+ if (no_page)
+ return 3;
+ if (*page1 != 1)
+ return 4;
+ if (*page2 != 2)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
+ if (*page2 != 2)
+ return 6;
+ if (*page1 != 0) /* use-after-free should return 0 */
+ return 7;
+ page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page3)
+ return 8;
+ *page3 = 3;
+ if (page1 != page3)
+ return 9;
+ if (*page2 != 2)
+ return 10;
+ if (*(page1 + PAGE_SIZE) != 0)
+ return 11;
+ if (*(page1 - PAGE_SIZE) != 0)
+ return 12;
+ if (*(page2 + PAGE_SIZE) != 0)
+ return 13;
+ if (*(page2 - PAGE_SIZE) != 0)
+ return 14;
+#endif
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/dmabuf-heaps/config b/tools/testing/selftests/dmabuf-heaps/config
new file mode 100644
index 000000000000..be091f1cdfa0
--- /dev/null
+++ b/tools/testing/selftests/dmabuf-heaps/config
@@ -0,0 +1,3 @@
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DRM_VGEM=y
diff --git a/tools/testing/selftests/drivers/net/netdevsim/settings b/tools/testing/selftests/drivers/net/netdevsim/settings
new file mode 100644
index 000000000000..a62d2fa1275c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/settings
@@ -0,0 +1 @@
+timeout=600
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index a0b8688b0836..fb4472ddffd8 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -19,8 +19,8 @@ include ../lib.mk
$(OUTPUT)/subdir:
mkdir -p $@
-$(OUTPUT)/script:
- echo '#!/bin/sh' > $@
+$(OUTPUT)/script: Makefile
+ echo '#!/bin/bash' > $@
echo 'exit $$*' >> $@
chmod +x $@
$(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
diff --git a/tools/testing/selftests/exec/binfmt_script.py b/tools/testing/selftests/exec/binfmt_script.py
index 05f94a741c7a..2c575a2c0eab 100755
--- a/tools/testing/selftests/exec/binfmt_script.py
+++ b/tools/testing/selftests/exec/binfmt_script.py
@@ -16,6 +16,8 @@ SIZE=256
NAME_MAX=int(subprocess.check_output(["getconf", "NAME_MAX", "."]))
test_num=0
+pass_num=0
+fail_num=0
code='''#!/usr/bin/perl
print "Executed interpreter! Args:\n";
@@ -42,7 +44,7 @@ foreach my $a (@ARGV) {
# ...
def test(name, size, good=True, leading="", root="./", target="/perl",
fill="A", arg="", newline="\n", hashbang="#!"):
- global test_num, tests, NAME_MAX
+ global test_num, pass_num, fail_num, tests, NAME_MAX
test_num += 1
if test_num > tests:
raise ValueError("more binfmt_script tests than expected! (want %d, expected %d)"
@@ -80,16 +82,20 @@ def test(name, size, good=True, leading="", root="./", target="/perl",
if good:
print("ok %d - binfmt_script %s (successful good exec)"
% (test_num, name))
+ pass_num += 1
else:
print("not ok %d - binfmt_script %s succeeded when it should have failed"
% (test_num, name))
+ fail_num = 1
else:
if good:
print("not ok %d - binfmt_script %s failed when it should have succeeded (rc:%d)"
% (test_num, name, proc.returncode))
+ fail_num = 1
else:
print("ok %d - binfmt_script %s (correctly failed bad exec)"
% (test_num, name))
+ pass_num += 1
# Clean up crazy binaries
os.unlink(script)
@@ -166,6 +172,8 @@ test(name="two-under-trunc-arg", size=int(SIZE/2), arg=" ")
test(name="two-under-leading", size=int(SIZE/2), leading=" ")
test(name="two-under-lead-trunc-arg", size=int(SIZE/2), leading=" ", arg=" ")
+print("# Totals: pass:%d fail:%d xfail:0 xpass:0 skip:0 error:0" % (pass_num, fail_num))
+
if test_num != tests:
raise ValueError("fewer binfmt_script tests than expected! (ran %d, expected %d"
% (test_num, tests))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 0546ca24f2b2..6418ded40bdd 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -98,10 +98,9 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
if (child == 0) {
/* Child: do execveat(). */
rc = execveat_(fd, path, argv, envp, flags);
- ksft_print_msg("execveat() failed, rc=%d errno=%d (%s)\n",
+ ksft_print_msg("child execveat() failed, rc=%d errno=%d (%s)\n",
rc, errno, strerror(errno));
- ksft_test_result_fail("%s\n", test_name);
- exit(1); /* should not reach here */
+ exit(errno);
}
/* Parent: wait for & check child's exit status. */
rc = waitpid(child, &status, 0);
@@ -226,11 +225,14 @@ static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
* "If the command name is found, but it is not an executable utility,
* the exit status shall be 126."), so allow either.
*/
- if (is_script)
+ if (is_script) {
+ ksft_print_msg("Invoke script via root_dfd and relative filename\n");
fail += check_execveat_invoked_rc(root_dfd, longpath + 1, 0,
127, 126);
- else
+ } else {
+ ksft_print_msg("Invoke exec via root_dfd and relative filename\n");
fail += check_execveat(root_dfd, longpath + 1, 0);
+ }
return fail;
}
diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c
index d487c2f6a615..17e3207d34ae 100644
--- a/tools/testing/selftests/exec/load_address.c
+++ b/tools/testing/selftests/exec/load_address.c
@@ -5,6 +5,7 @@
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../kselftest.h"
struct Statistics {
unsigned long long load_address;
@@ -41,28 +42,23 @@ int main(int argc, char **argv)
unsigned long long misalign;
int ret;
+ ksft_print_header();
+ ksft_set_plan(1);
+
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
- if (ret != 1) {
- fprintf(stderr, "FAILED\n");
- return 1;
- }
+ if (ret != 1)
+ ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
- if (extracted.alignment == 0) {
- fprintf(stderr, "No alignment found\n");
- return 1;
- } else if (extracted.alignment & (extracted.alignment - 1)) {
- fprintf(stderr, "Alignment is not a power of 2\n");
- return 1;
- }
+ if (extracted.alignment == 0)
+ ksft_exit_fail_msg("FAILED: No alignment found\n");
+ else if (extracted.alignment & (extracted.alignment - 1))
+ ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
misalign = extracted.load_address & (extracted.alignment - 1);
- if (misalign) {
- printf("alignment = %llu, load_address = %llu\n",
- extracted.alignment, extracted.load_address);
- fprintf(stderr, "FAILED\n");
- return 1;
- }
+ if (misalign)
+ ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n",
+ extracted.alignment, extracted.load_address);
- fprintf(stderr, "PASS\n");
- return 0;
+ ksft_test_result_pass("Completed\n");
+ ksft_finished();
}
diff --git a/tools/testing/selftests/exec/recursion-depth.c b/tools/testing/selftests/exec/recursion-depth.c
index 2dbd5bc45b3e..b2f37d86a5f6 100644
--- a/tools/testing/selftests/exec/recursion-depth.c
+++ b/tools/testing/selftests/exec/recursion-depth.c
@@ -23,45 +23,44 @@
#include <fcntl.h>
#include <sys/mount.h>
#include <unistd.h>
+#include "../kselftest.h"
int main(void)
{
+ int fd, rv;
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
if (unshare(CLONE_NEWNS) == -1) {
if (errno == ENOSYS || errno == EPERM) {
- fprintf(stderr, "error: unshare, errno %d\n", errno);
- return 4;
+ ksft_test_result_skip("error: unshare, errno %d\n", errno);
+ ksft_finished();
}
- fprintf(stderr, "error: unshare, errno %d\n", errno);
- return 1;
- }
- if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
- fprintf(stderr, "error: mount '/', errno %d\n", errno);
- return 1;
+ ksft_exit_fail_msg("error: unshare, errno %d\n", errno);
}
+
+ if (mount(NULL, "/", NULL, MS_PRIVATE | MS_REC, NULL) == -1)
+ ksft_exit_fail_msg("error: mount '/', errno %d\n", errno);
+
/* Require "exec" filesystem. */
- if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1) {
- fprintf(stderr, "error: mount ramfs, errno %d\n", errno);
- return 1;
- }
+ if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1)
+ ksft_exit_fail_msg("error: mount ramfs, errno %d\n", errno);
#define FILENAME "/tmp/1"
- int fd = creat(FILENAME, 0700);
- if (fd == -1) {
- fprintf(stderr, "error: creat, errno %d\n", errno);
- return 1;
- }
+ fd = creat(FILENAME, 0700);
+ if (fd == -1)
+ ksft_exit_fail_msg("error: creat, errno %d\n", errno);
+
#define S "#!" FILENAME "\n"
- if (write(fd, S, strlen(S)) != strlen(S)) {
- fprintf(stderr, "error: write, errno %d\n", errno);
- return 1;
- }
+ if (write(fd, S, strlen(S)) != strlen(S))
+ ksft_exit_fail_msg("error: write, errno %d\n", errno);
+
close(fd);
- int rv = execve(FILENAME, NULL, NULL);
- if (rv == -1 && errno == ELOOP) {
- return 0;
- }
- fprintf(stderr, "error: execve, rv %d, errno %d\n", rv, errno);
- return 1;
+ rv = execve(FILENAME, NULL, NULL);
+ ksft_test_result(rv == -1 && errno == ELOOP,
+ "execve failed as expected (ret %d, errno %d)\n", rv, errno);
+ ksft_finished();
}
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index b1ede6249866..b7c8f29c09a9 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -ne 0 ]; then
fail "any of scheduler events should not be recorded"
fi
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index 2de7c61d1ae3..3f74c09c56b6 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -24,7 +24,7 @@ echo 0 > events/enable
echo "Get the most frequently calling function"
sample_events
-target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
+target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
if [ -z "$target_func" ]; then
exit_fail
fi
diff --git a/tools/testing/selftests/iommu/config b/tools/testing/selftests/iommu/config
index 110d73917615..02a2a1b267c1 100644
--- a/tools/testing/selftests/iommu/config
+++ b/tools/testing/selftests/iommu/config
@@ -1,3 +1,5 @@
CONFIG_IOMMUFD=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION=y
CONFIG_IOMMUFD_TEST=y
+CONFIG_FAILSLAB=y
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 541bf192e30e..14bbab0cce13 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -51,6 +51,7 @@
#include <stdarg.h>
#include <string.h>
#include <stdio.h>
+#include <sys/utsname.h>
#endif
#ifndef ARRAY_SIZE
@@ -79,6 +80,9 @@
#define KSFT_XPASS 3
#define KSFT_SKIP 4
+#ifndef __noreturn
+#define __noreturn __attribute__((__noreturn__))
+#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
/* counters */
@@ -288,24 +292,26 @@ void ksft_test_result_code(int exit_code, const char *test_name,
}
/* Docs seem to call for double space if directive is absent */
- if (!directive[0] && msg[0])
+ if (!directive[0] && msg)
directive = " # ";
- va_start(args, msg);
printf("%s %u %s%s", tap_code, ksft_test_num(), test_name, directive);
errno = saved_errno;
- vprintf(msg, args);
+ if (msg) {
+ va_start(args, msg);
+ vprintf(msg, args);
+ va_end(args);
+ }
printf("\n");
- va_end(args);
}
-static inline int ksft_exit_pass(void)
+static inline __noreturn int ksft_exit_pass(void)
{
ksft_print_cnts();
exit(KSFT_PASS);
}
-static inline int ksft_exit_fail(void)
+static inline __noreturn int ksft_exit_fail(void)
{
ksft_print_cnts();
exit(KSFT_FAIL);
@@ -332,7 +338,7 @@ static inline int ksft_exit_fail(void)
ksft_cnt.ksft_xfail + \
ksft_cnt.ksft_xskip)
-static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
+static inline __noreturn __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
{
int saved_errno = errno;
va_list args;
@@ -347,19 +353,19 @@ static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
exit(KSFT_FAIL);
}
-static inline int ksft_exit_xfail(void)
+static inline __noreturn int ksft_exit_xfail(void)
{
ksft_print_cnts();
exit(KSFT_XFAIL);
}
-static inline int ksft_exit_xpass(void)
+static inline __noreturn int ksft_exit_xpass(void)
{
ksft_print_cnts();
exit(KSFT_XPASS);
}
-static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
+static inline __noreturn __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
{
int saved_errno = errno;
va_list args;
@@ -388,4 +394,21 @@ static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
exit(KSFT_SKIP);
}
+static inline int ksft_min_kernel_version(unsigned int min_major,
+ unsigned int min_minor)
+{
+#ifdef NOLIBC
+ ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
+ return 0;
+#else
+ unsigned int major, minor;
+ struct utsname info;
+
+ if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2)
+ ksft_exit_fail_msg("Can't parse kernel version\n");
+
+ return major > min_major || (major == min_major && minor >= min_minor);
+#endif
+}
+
#endif /* __KSELFTEST_H */
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 4fd735e48ee7..d98702b6955d 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -56,7 +56,6 @@
#include <asm/types.h>
#include <ctype.h>
#include <errno.h>
-#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@@ -383,6 +382,7 @@
FIXTURE_DATA(fixture_name) self; \
pid_t child = 1; \
int status = 0; \
+ bool jmp = false; \
memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
if (setjmp(_metadata->env) == 0) { \
/* Use the same _metadata. */ \
@@ -399,8 +399,10 @@
_metadata->exit_code = KSFT_FAIL; \
} \
} \
+ else \
+ jmp = true; \
if (child == 0) { \
- if (_metadata->setup_completed && !_metadata->teardown_parent) \
+ if (_metadata->setup_completed && !_metadata->teardown_parent && !jmp) \
fixture_name##_teardown(_metadata, &self, variant->data); \
_exit(0); \
} \
@@ -1156,7 +1158,7 @@ void __run_test(struct __fixture_metadata *f,
struct __test_metadata *t)
{
struct __test_xfail *xfail;
- char test_name[LINE_MAX];
+ char *test_name;
const char *diagnostic;
/* reset test struct */
@@ -1164,8 +1166,12 @@ void __run_test(struct __fixture_metadata *f,
t->trigger = 0;
memset(t->results->reason, 0, sizeof(t->results->reason));
- snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ if (asprintf(&test_name, "%s%s%s.%s", f->name,
+ variant->name[0] ? "." : "", variant->name, t->name) == -1) {
+ ksft_print_msg("ERROR ALLOCATING MEMORY\n");
+ t->exit_code = KSFT_FAIL;
+ _exit(t->exit_code);
+ }
ksft_print_msg(" RUN %s ...\n", test_name);
@@ -1202,7 +1208,8 @@ void __run_test(struct __fixture_metadata *f,
diagnostic = "unknown";
ksft_test_result_code(t->exit_code, test_name,
- diagnostic ? "%s" : "", diagnostic);
+ diagnostic ? "%s" : NULL, diagnostic);
+ free(test_name);
}
static int test_harness_run(int argc, char **argv)
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index ddba2c2fb5de..4eaba83cdcf3 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -135,8 +135,8 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
- "config_iter + 1 = 0x%lx, irq_iter = 0x%lx.\n"
- " Guest timer interrupt was not trigged within the specified\n"
+ "config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
+ " Guest timer interrupt was not triggered within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 3bd03b088dda..81ce37ec407d 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1037,8 +1037,19 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_property property,
uint32_t value);
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
+
+static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
+ struct kvm_x86_cpu_feature feature)
+{
+ struct kvm_cpuid_entry2 *entry;
+
+ entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
+ return *((&entry->eax) + feature.reg) & BIT(feature.bit);
+}
+
void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_feature feature,
bool set);
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 6628dc4dda89..1a6da7389bf1 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -22,10 +22,11 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;
- for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
- *((volatile uint64_t *)gpa) = gpa;
-
- GUEST_DONE();
+ for (;;) {
+ for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
+ *((volatile uint64_t *)gpa) = gpa;
+ GUEST_SYNC(0);
+ }
}
struct vcpu_info {
@@ -55,7 +56,7 @@ static void rendezvous_with_boss(void)
static void run_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
- TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
}
static void *vcpu_worker(void *data)
@@ -64,17 +65,13 @@ static void *vcpu_worker(void *data)
struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
- struct kvm_regs regs;
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
- /* Snapshot regs before the first run. */
- vcpu_regs_get(vcpu, &regs);
rendezvous_with_boss();
run_vcpu(vcpu);
rendezvous_with_boss();
- vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
diff --git a/tools/testing/selftests/kvm/riscv/arch_timer.c b/tools/testing/selftests/kvm/riscv/arch_timer.c
index e22848f747c0..0f9cabd99fd4 100644
--- a/tools/testing/selftests/kvm/riscv/arch_timer.c
+++ b/tools/testing/selftests/kvm/riscv/arch_timer.c
@@ -60,7 +60,7 @@ static void guest_run(struct test_vcpu_shared_data *shared_data)
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
- " Guest timer interrupt was not trigged within the specified\n"
+ " Guest timer interrupt was not triggered within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 06b43ed23580..bd57d991e27d 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -333,7 +333,7 @@ static void test_invalid_memory_region_flags(void)
struct kvm_vm *vm;
int r, i;
-#if defined __aarch64__ || defined __x86_64__
+#if defined __aarch64__ || defined __riscv || defined __x86_64__
supported_flags |= KVM_MEM_READONLY;
#endif
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
index 9e2879af7c20..40cc59f4e650 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
@@ -133,6 +133,43 @@ static void enter_guest(struct kvm_vcpu *vcpu)
}
}
+static void test_pv_unhalt(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct kvm_cpuid_entry2 *ent;
+ u32 kvm_sig_old;
+
+ pr_info("testing KVM_FEATURE_PV_UNHALT\n");
+
+ TEST_REQUIRE(KVM_CAP_X86_DISABLE_EXITS);
+
+ /* KVM_PV_UNHALT test */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
+ vcpu_set_cpuid_feature(vcpu, X86_FEATURE_KVM_PV_UNHALT);
+
+ TEST_ASSERT(vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT),
+ "Enabling X86_FEATURE_KVM_PV_UNHALT had no effect");
+
+ /* Make sure KVM clears vcpu->arch.kvm_cpuid */
+ ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE);
+ kvm_sig_old = ent->ebx;
+ ent->ebx = 0xdeadbeef;
+ vcpu_set_cpuid(vcpu);
+
+ vm_enable_cap(vm, KVM_CAP_X86_DISABLE_EXITS, KVM_X86_DISABLE_EXITS_HLT);
+ ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE);
+ ent->ebx = kvm_sig_old;
+ vcpu_set_cpuid(vcpu);
+
+ TEST_ASSERT(!vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT),
+ "KVM_FEATURE_PV_UNHALT is set with KVM_CAP_X86_DISABLE_EXITS");
+
+ /* FIXME: actually test KVM_FEATURE_PV_UNHALT feature */
+
+ kvm_vm_free(vm);
+}
+
int main(void)
{
struct kvm_vcpu *vcpu;
@@ -151,4 +188,6 @@ int main(void)
enter_guest(vcpu);
kvm_vm_free(vm);
+
+ test_pv_unhalt();
}
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 29609b52f8fa..26c85815f7e9 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -416,12 +416,30 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
static void guest_test_gp_counters(void)
{
+ uint8_t pmu_version = guest_get_pmu_version();
uint8_t nr_gp_counters = 0;
uint32_t base_msr;
- if (guest_get_pmu_version())
+ if (pmu_version)
nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+ /*
+ * For v2+ PMUs, PERF_GLOBAL_CTRL's architectural post-RESET value is
+ * "Sets bits n-1:0 and clears the upper bits", where 'n' is the number
+ * of GP counters. If there are no GP counters, require KVM to leave
+ * PERF_GLOBAL_CTRL '0'. This edge case isn't covered by the SDM, but
+ * follow the spirit of the architecture and only globally enable GP
+ * counters, of which there are none.
+ */
+ if (pmu_version > 1) {
+ uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL);
+
+ if (nr_gp_counters)
+ GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0));
+ else
+ GUEST_ASSERT_EQ(global_ctrl, 0);
+ }
+
if (this_cpu_has(X86_FEATURE_PDCM) &&
rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
base_msr = MSR_IA32_PMC0;
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 7f6f5f23fb9b..977948fd52e6 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -28,16 +28,16 @@
#define NESTED_TEST_MEM1 0xc0001000
#define NESTED_TEST_MEM2 0xc0002000
-static void l2_guest_code(void)
+static void l2_guest_code(u64 *a, u64 *b)
{
- *(volatile uint64_t *)NESTED_TEST_MEM1;
- *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
+ READ_ONCE(*a);
+ WRITE_ONCE(*a, 1);
GUEST_SYNC(true);
GUEST_SYNC(false);
- *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+ WRITE_ONCE(*b, 1);
GUEST_SYNC(true);
- *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+ WRITE_ONCE(*b, 1);
GUEST_SYNC(true);
GUEST_SYNC(false);
@@ -45,17 +45,33 @@ static void l2_guest_code(void)
vmcall();
}
+static void l2_guest_code_ept_enabled(void)
+{
+ l2_guest_code((u64 *)NESTED_TEST_MEM1, (u64 *)NESTED_TEST_MEM2);
+}
+
+static void l2_guest_code_ept_disabled(void)
+{
+ /* Access the same L1 GPAs as l2_guest_code_ept_enabled() */
+ l2_guest_code((u64 *)GUEST_TEST_MEM, (u64 *)GUEST_TEST_MEM);
+}
+
void l1_guest_code(struct vmx_pages *vmx)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ void *l2_rip;
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
- prepare_vmcs(vmx, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ if (vmx->eptp_gpa)
+ l2_rip = l2_guest_code_ept_enabled;
+ else
+ l2_rip = l2_guest_code_ept_disabled;
+
+ prepare_vmcs(vmx, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(false);
GUEST_ASSERT(!vmlaunch());
@@ -64,7 +80,7 @@ void l1_guest_code(struct vmx_pages *vmx)
GUEST_DONE();
}
-int main(int argc, char *argv[])
+static void test_vmx_dirty_log(bool enable_ept)
{
vm_vaddr_t vmx_pages_gva = 0;
struct vmx_pages *vmx;
@@ -76,8 +92,7 @@ int main(int argc, char *argv[])
struct ucall uc;
bool done = false;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- TEST_REQUIRE(kvm_cpu_has_ept());
+ pr_info("Nested EPT: %s\n", enable_ept ? "enabled" : "disabled");
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
@@ -103,11 +118,16 @@ int main(int argc, char *argv[])
*
* Note that prepare_eptp should be called only L1's GPA map is done,
* meaning after the last call to virt_map.
+ *
+ * When EPT is disabled, the L2 guest code will still access the same L1
+ * GPAs as the EPT enabled case.
*/
- prepare_eptp(vmx, vm, 0);
- nested_map_memslot(vmx, vm, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
- nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
+ if (enable_ept) {
+ prepare_eptp(vmx, vm, 0);
+ nested_map_memslot(vmx, vm, 0);
+ nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
+ nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
+ }
bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
@@ -148,3 +168,15 @@ int main(int argc, char *argv[])
}
}
}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+ test_vmx_dirty_log(/*enable_ept=*/false);
+
+ if (kvm_cpu_has_ept())
+ test_vmx_dirty_log(/*enable_ept=*/true);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index cbe99594d319..18a49c70d4c6 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -203,7 +203,7 @@ int main(int argc, char **argv)
ksft_print_header();
ksft_set_plan(nthreads);
- filed = open(file, O_RDWR|O_CREAT);
+ filed = open(file, O_RDWR|O_CREAT, 0664);
if (filed < 0)
ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
index 200bedcdc32e..1e01d3ddc11c 100644
--- a/tools/testing/selftests/mm/mdwe_test.c
+++ b/tools/testing/selftests/mm/mdwe_test.c
@@ -7,6 +7,7 @@
#include <linux/mman.h>
#include <linux/prctl.h>
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sys/auxv.h>
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index f822ae31af22..48dc151f8fca 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -54,7 +54,6 @@ int test_nr;
u64 shadow_pkey_reg;
int dprint_in_signal;
char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
-char buf[256];
void cat_into_file(char *str, char *file)
{
@@ -1745,38 +1744,6 @@ void pkey_setup_shadow(void)
shadow_pkey_reg = __read_pkey_reg();
}
-void restore_settings_atexit(void)
-{
- cat_into_file(buf, "/proc/sys/vm/nr_hugepages");
-}
-
-void save_settings(void)
-{
- int fd;
- int err;
-
- if (geteuid())
- return;
-
- fd = open("/proc/sys/vm/nr_hugepages", O_RDONLY);
- if (fd < 0) {
- fprintf(stderr, "error opening\n");
- perror("error: ");
- exit(__LINE__);
- }
-
- /* -1 to guarantee leaving the trailing \0 */
- err = read(fd, buf, sizeof(buf)-1);
- if (err < 0) {
- fprintf(stderr, "error reading\n");
- perror("error: ");
- exit(__LINE__);
- }
-
- atexit(restore_settings_atexit);
- close(fd);
-}
-
int main(void)
{
int nr_iterations = 22;
@@ -1784,7 +1751,6 @@ int main(void)
srand((unsigned int)time(NULL));
- save_settings();
setup_handlers();
printf("has pkeys: %d\n", pkeys_supported);
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index c2c542fe7b17..4bdb3a0c7a60 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -385,6 +385,7 @@ CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
CATEGORY="ksm" run_test ./ksm_functional_tests
# protection_keys tests
+nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
if [ -x ./protection_keys_32 ]
then
CATEGORY="pkey" run_test ./protection_keys_32
@@ -394,6 +395,7 @@ if [ -x ./protection_keys_64 ]
then
CATEGORY="pkey" run_test ./protection_keys_64
fi
+echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
if [ -x ./soft-dirty ]
then
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index cc5f144430d4..7dbfa53d93a0 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
- test_fd = open(fname, O_RDWR | O_CREAT);
+ test_fd = open(fname, O_RDWR | O_CREAT, 0664);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 856662d2f87a..d3c7f5fb3e7b 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -223,7 +223,7 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
}
- fd = open(testfile, O_CREAT|O_WRONLY);
+ fd = open(testfile, O_CREAT|O_WRONLY, 0664);
if (fd == -1) {
ksft_perror("Cannot open testing file");
goto cleanup;
@@ -300,7 +300,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
char **addr)
{
size_t i;
- int dummy;
+ int __attribute__((unused)) dummy = 0;
srand(time(NULL));
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index b0ac0ec2356d..7ad6ba660c7d 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -18,6 +18,7 @@ bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
uffd_test_case_ops_t *uffd_test_case_ops;
+atomic_bool ready_for_fork;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -518,6 +519,8 @@ void *uffd_poll_thread(void *arg)
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
+ ready_for_fork = true;
+
for (;;) {
ret = poll(pollfd, 2, -1);
if (ret <= 0) {
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index cb055282c89c..cc5629c3d2aa 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -32,6 +32,7 @@
#include <inttypes.h>
#include <stdint.h>
#include <sys/random.h>
+#include <stdatomic.h>
#include "../kselftest.h"
#include "vm_util.h"
@@ -103,6 +104,7 @@ extern bool map_shared;
extern bool test_uffdio_wp;
extern unsigned long long *count_verify;
extern volatile bool test_uffdio_copy_eexist;
+extern atomic_bool ready_for_fork;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 2b9f8cc52639..21ec23206ab4 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -775,6 +775,8 @@ static void uffd_sigbus_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
@@ -790,6 +792,9 @@ static void uffd_sigbus_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -829,6 +834,8 @@ static void uffd_events_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
@@ -838,6 +845,9 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -1427,7 +1437,8 @@ uffd_test_case_t uffd_tests[] = {
.uffd_fn = uffd_sigbus_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
- UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
+ UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP |
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "events",
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index c02990bbd56f..9007c420d52c 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -3,7 +3,7 @@
#include <stdbool.h>
#include <sys/mman.h>
#include <err.h>
-#include <string.h> /* ffsl() */
+#include <strings.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
#define BIT_ULL(nr) (1ULL << (nr))
diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c
index a2662348cdb1..b7b54d646b93 100644
--- a/tools/testing/selftests/net/bind_wildcard.c
+++ b/tools/testing/selftests/net/bind_wildcard.c
@@ -6,7 +6,9 @@
#include "../kselftest_harness.h"
-struct in6_addr in6addr_v4mapped_any = {
+static const __u32 in4addr_any = INADDR_ANY;
+static const __u32 in4addr_loopback = INADDR_LOOPBACK;
+static const struct in6_addr in6addr_v4mapped_any = {
.s6_addr = {
0, 0, 0, 0,
0, 0, 0, 0,
@@ -14,8 +16,7 @@ struct in6_addr in6addr_v4mapped_any = {
0, 0, 0, 0
}
};
-
-struct in6_addr in6addr_v4mapped_loopback = {
+static const struct in6_addr in6addr_v4mapped_loopback = {
.s6_addr = {
0, 0, 0, 0,
0, 0, 0, 0,
@@ -24,137 +25,785 @@ struct in6_addr in6addr_v4mapped_loopback = {
}
};
+#define NR_SOCKETS 8
+
FIXTURE(bind_wildcard)
{
- struct sockaddr_in addr4;
- struct sockaddr_in6 addr6;
+ int fd[NR_SOCKETS];
+ socklen_t addrlen[NR_SOCKETS];
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ } addr[NR_SOCKETS];
};
FIXTURE_VARIANT(bind_wildcard)
{
- const __u32 addr4_const;
- const struct in6_addr *addr6_const;
- int expected_errno;
+ sa_family_t family[2];
+ const void *addr[2];
+ bool ipv6_only[2];
+
+ /* 6 bind() calls below follow two bind() for the defined 2 addresses:
+ *
+ * 0.0.0.0
+ * 127.0.0.1
+ * ::
+ * ::1
+ * ::ffff:0.0.0.0
+ * ::ffff:127.0.0.1
+ */
+ int expected_errno[NR_SOCKETS];
+ int expected_reuse_errno[NR_SOCKETS];
+};
+
+/* (IPv4, IPv4) */
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v4_local)
+{
+ .family = {AF_INET, AF_INET},
+ .addr = {&in4addr_any, &in4addr_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v4_any)
+{
+ .family = {AF_INET, AF_INET},
+ .addr = {&in4addr_loopback, &in4addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
};
+/* (IPv4, IPv6) */
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
{
- .addr4_const = INADDR_ANY,
- .addr6_const = &in6addr_any,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_any, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any_only)
+{
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_any, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
{
- .addr4_const = INADDR_ANY,
- .addr6_const = &in6addr_loopback,
- .expected_errno = 0,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_any, &in6addr_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
{
- .addr4_const = INADDR_ANY,
- .addr6_const = &in6addr_v4mapped_any,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_any, &in6addr_v4mapped_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
{
- .addr4_const = INADDR_ANY,
- .addr6_const = &in6addr_v4mapped_loopback,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_any, &in6addr_v4mapped_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
{
- .addr4_const = INADDR_LOOPBACK,
- .addr6_const = &in6addr_any,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_loopback, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any_only)
+{
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_loopback, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
{
- .addr4_const = INADDR_LOOPBACK,
- .addr6_const = &in6addr_loopback,
- .expected_errno = 0,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_loopback, &in6addr_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
{
- .addr4_const = INADDR_LOOPBACK,
- .addr6_const = &in6addr_v4mapped_any,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_loopback, &in6addr_v4mapped_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
};
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
{
- .addr4_const = INADDR_LOOPBACK,
- .addr6_const = &in6addr_v4mapped_loopback,
- .expected_errno = EADDRINUSE,
+ .family = {AF_INET, AF_INET6},
+ .addr = {&in4addr_loopback, &in6addr_v4mapped_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+/* (IPv6, IPv4) */
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v4_any)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_any, &in4addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
};
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v4_any)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_any, &in4addr_any},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v4_local)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_any, &in4addr_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v4_local)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_any, &in4addr_loopback},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v4_any)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_loopback, &in4addr_any},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v4_local)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_loopback, &in4addr_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v4_any)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_v4mapped_any, &in4addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v4_local)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_v4mapped_any, &in4addr_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_local_v4_any)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_v4mapped_loopback, &in4addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_local_v4_local)
+{
+ .family = {AF_INET6, AF_INET},
+ .addr = {&in6addr_v4mapped_loopback, &in4addr_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+/* (IPv6, IPv6) */
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_any},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, EADDRINUSE,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_any_only)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_any_only)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_any},
+ .ipv6_only = {true, true},
+ .expected_errno = {0, EADDRINUSE,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_loopback},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, EADDRINUSE,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_v4mapped_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_v4mapped_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_v4mapped_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_v4mapped_any},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_v4mapped_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_v4mapped_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_v4mapped_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_any, &in6addr_v4mapped_loopback},
+ .ipv6_only = {true, false},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_loopback, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_any_only)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_loopback, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, EADDRINUSE,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ 0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_v4mapped_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_loopback, &in6addr_v4mapped_any},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_v4mapped_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_loopback, &in6addr_v4mapped_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_any, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_any_only)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_any, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_any, &in6addr_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_v4mapped_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_any, &in6addr_v4mapped_loopback},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_loopback, &in6addr_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_any_only)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_loopback, &in6addr_any},
+ .ipv6_only = {false, true},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_local)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_loopback, &in6addr_loopback},
+ .expected_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE},
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_v4mapped_any)
+{
+ .family = {AF_INET6, AF_INET6},
+ .addr = {&in6addr_v4mapped_loopback, &in6addr_v4mapped_any},
+ .expected_errno = {0, EADDRINUSE,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+ .expected_reuse_errno = {0, 0,
+ EADDRINUSE, EADDRINUSE,
+ EADDRINUSE, 0,
+ EADDRINUSE, EADDRINUSE},
+};
+
+static void setup_addr(FIXTURE_DATA(bind_wildcard) *self, int i,
+ int family, const void *addr_const)
+{
+ if (family == AF_INET) {
+ struct sockaddr_in *addr4 = &self->addr[i].addr4;
+ const __u32 *addr4_const = addr_const;
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = htons(0);
+ addr4->sin_addr.s_addr = htonl(*addr4_const);
+
+ self->addrlen[i] = sizeof(struct sockaddr_in);
+ } else {
+ struct sockaddr_in6 *addr6 = &self->addr[i].addr6;
+ const struct in6_addr *addr6_const = addr_const;
+
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = htons(0);
+ addr6->sin6_addr = *addr6_const;
+
+ self->addrlen[i] = sizeof(struct sockaddr_in6);
+ }
+}
+
FIXTURE_SETUP(bind_wildcard)
{
- self->addr4.sin_family = AF_INET;
- self->addr4.sin_port = htons(0);
- self->addr4.sin_addr.s_addr = htonl(variant->addr4_const);
+ setup_addr(self, 0, variant->family[0], variant->addr[0]);
+ setup_addr(self, 1, variant->family[1], variant->addr[1]);
+
+ setup_addr(self, 2, AF_INET, &in4addr_any);
+ setup_addr(self, 3, AF_INET, &in4addr_loopback);
- self->addr6.sin6_family = AF_INET6;
- self->addr6.sin6_port = htons(0);
- self->addr6.sin6_addr = *variant->addr6_const;
+ setup_addr(self, 4, AF_INET6, &in6addr_any);
+ setup_addr(self, 5, AF_INET6, &in6addr_loopback);
+ setup_addr(self, 6, AF_INET6, &in6addr_v4mapped_any);
+ setup_addr(self, 7, AF_INET6, &in6addr_v4mapped_loopback);
}
FIXTURE_TEARDOWN(bind_wildcard)
{
+ int i;
+
+ for (i = 0; i < NR_SOCKETS; i++)
+ close(self->fd[i]);
}
-void bind_sockets(struct __test_metadata *_metadata,
- FIXTURE_DATA(bind_wildcard) *self,
- int expected_errno,
- struct sockaddr *addr1, socklen_t addrlen1,
- struct sockaddr *addr2, socklen_t addrlen2)
+void bind_socket(struct __test_metadata *_metadata,
+ FIXTURE_DATA(bind_wildcard) *self,
+ const FIXTURE_VARIANT(bind_wildcard) *variant,
+ int i, int reuse)
{
- int fd[2];
int ret;
- fd[0] = socket(addr1->sa_family, SOCK_STREAM, 0);
- ASSERT_GT(fd[0], 0);
+ self->fd[i] = socket(self->addr[i].addr.sa_family, SOCK_STREAM, 0);
+ ASSERT_GT(self->fd[i], 0);
- ret = bind(fd[0], addr1, addrlen1);
- ASSERT_EQ(ret, 0);
+ if (i < 2 && variant->ipv6_only[i]) {
+ ret = setsockopt(self->fd[i], SOL_IPV6, IPV6_V6ONLY, &(int){1}, sizeof(int));
+ ASSERT_EQ(ret, 0);
+ }
- ret = getsockname(fd[0], addr1, &addrlen1);
- ASSERT_EQ(ret, 0);
+ if (i < 2 && reuse) {
+ ret = setsockopt(self->fd[i], SOL_SOCKET, reuse, &(int){1}, sizeof(int));
+ ASSERT_EQ(ret, 0);
+ }
- ((struct sockaddr_in *)addr2)->sin_port = ((struct sockaddr_in *)addr1)->sin_port;
+ self->addr[i].addr4.sin_port = self->addr[0].addr4.sin_port;
- fd[1] = socket(addr2->sa_family, SOCK_STREAM, 0);
- ASSERT_GT(fd[1], 0);
+ ret = bind(self->fd[i], &self->addr[i].addr, self->addrlen[i]);
- ret = bind(fd[1], addr2, addrlen2);
- if (expected_errno) {
- ASSERT_EQ(ret, -1);
- ASSERT_EQ(errno, expected_errno);
+ if (reuse) {
+ if (variant->expected_reuse_errno[i]) {
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, variant->expected_reuse_errno[i]);
+ } else {
+ ASSERT_EQ(ret, 0);
+ }
} else {
+ if (variant->expected_errno[i]) {
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, variant->expected_errno[i]);
+ } else {
+ ASSERT_EQ(ret, 0);
+ }
+ }
+
+ if (i == 0) {
+ ret = getsockname(self->fd[0], &self->addr[0].addr, &self->addrlen[0]);
ASSERT_EQ(ret, 0);
}
+}
- close(fd[1]);
- close(fd[0]);
+TEST_F(bind_wildcard, plain)
+{
+ int i;
+
+ for (i = 0; i < NR_SOCKETS; i++)
+ bind_socket(_metadata, self, variant, i, 0);
}
-TEST_F(bind_wildcard, v4_v6)
+TEST_F(bind_wildcard, reuseaddr)
{
- bind_sockets(_metadata, self, variant->expected_errno,
- (struct sockaddr *)&self->addr4, sizeof(self->addr4),
- (struct sockaddr *)&self->addr6, sizeof(self->addr6));
+ int i;
+
+ for (i = 0; i < NR_SOCKETS; i++)
+ bind_socket(_metadata, self, variant, i, SO_REUSEADDR);
}
-TEST_F(bind_wildcard, v6_v4)
+TEST_F(bind_wildcard, reuseport)
{
- bind_sockets(_metadata, self, variant->expected_errno,
- (struct sockaddr *)&self->addr6, sizeof(self->addr6),
- (struct sockaddr *)&self->addr4, sizeof(self->addr4));
+ int i;
+
+ for (i = 0; i < NR_SOCKETS; i++)
+ bind_socket(_metadata, self, variant, i, SO_REUSEPORT);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 4c4248554826..4131f3263a48 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -383,12 +383,14 @@ do_transfer()
local stat_cookierx_last
local stat_csum_err_s
local stat_csum_err_c
+ local stat_tcpfb_last_l
stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
@@ -457,11 +459,13 @@ do_transfer()
local stat_cookietx_now
local stat_cookierx_now
local stat_ooo_now
+ local stat_tcpfb_now_l
stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -508,6 +512,11 @@ do_transfer()
fi
fi
+ if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
+ mptcp_lib_pr_fail "unexpected fallback to TCP"
+ rets=1
+ fi
+
if [ $cookies -eq 2 ];then
if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
extra+=" WARN: CookieSent: did not advance"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 5e9211e89825..e4403236f655 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -729,7 +729,7 @@ pm_nl_check_endpoint()
[ -n "$_flags" ]; flags="flags $_flags"
shift
elif [ $1 = "dev" ]; then
- [ -n "$2" ]; dev="dev $1"
+ [ -n "$2" ]; dev="dev $2"
shift
elif [ $1 = "id" ]; then
_id=$2
@@ -3610,6 +3610,8 @@ endpoint_tests()
local tests_pid=$!
wait_mpj $ns2
+ pm_nl_check_endpoint "creation" \
+ $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
chk_subflow_nr "before delete" 2
chk_mptcp_info subflows 1 subflows 1
diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c
index 7c5b12664b03..bfb07dc49518 100644
--- a/tools/testing/selftests/net/reuseaddr_conflict.c
+++ b/tools/testing/selftests/net/reuseaddr_conflict.c
@@ -109,6 +109,6 @@ int main(void)
fd1 = open_port(0, 1);
if (fd1 >= 0)
error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
- fprintf(stderr, "Success");
+ fprintf(stderr, "Success\n");
return 0;
}
diff --git a/tools/testing/selftests/net/tcp_ao/lib/proc.c b/tools/testing/selftests/net/tcp_ao/lib/proc.c
index 2fb6dd8adba6..8b984fa04286 100644
--- a/tools/testing/selftests/net/tcp_ao/lib/proc.c
+++ b/tools/testing/selftests/net/tcp_ao/lib/proc.c
@@ -86,7 +86,7 @@ static void netstat_read_type(FILE *fnetstat, struct netstat **dest, char *line)
pos = strchr(line, ' ') + 1;
- if (fscanf(fnetstat, type->header_name) == EOF)
+ if (fscanf(fnetstat, "%[^ :]", type->header_name) == EOF)
test_error("fscanf(%s)", type->header_name);
if (fread(&tmp, 1, 1, fnetstat) != 1 || tmp != ':')
test_error("Unexpected netstat format (%c)", tmp);
diff --git a/tools/testing/selftests/net/tcp_ao/lib/setup.c b/tools/testing/selftests/net/tcp_ao/lib/setup.c
index 92276f916f2f..e408b9243b2c 100644
--- a/tools/testing/selftests/net/tcp_ao/lib/setup.c
+++ b/tools/testing/selftests/net/tcp_ao/lib/setup.c
@@ -17,37 +17,37 @@ static pthread_mutex_t ksft_print_lock = PTHREAD_MUTEX_INITIALIZER;
void __test_msg(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_print_msg(buf);
+ ksft_print_msg("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
void __test_ok(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_test_result_pass(buf);
+ ksft_test_result_pass("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
void __test_fail(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_test_result_fail(buf);
+ ksft_test_result_fail("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
void __test_xfail(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_test_result_xfail(buf);
+ ksft_test_result_xfail("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
void __test_error(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_test_result_error(buf);
+ ksft_test_result_error("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
void __test_skip(const char *buf)
{
pthread_mutex_lock(&ksft_print_lock);
- ksft_test_result_skip(buf);
+ ksft_test_result_skip("%s", buf);
pthread_mutex_unlock(&ksft_print_lock);
}
diff --git a/tools/testing/selftests/net/tcp_ao/rst.c b/tools/testing/selftests/net/tcp_ao/rst.c
index 7df8b8700e39..a2fe88d35ac0 100644
--- a/tools/testing/selftests/net/tcp_ao/rst.c
+++ b/tools/testing/selftests/net/tcp_ao/rst.c
@@ -256,8 +256,6 @@ static int test_wait_fds(int sk[], size_t nr, bool is_writable[],
static void test_client_active_rst(unsigned int port)
{
- /* one in queue, another accept()ed */
- unsigned int wait_for = backlog + 2;
int i, sk[3], err;
bool is_writable[ARRAY_SIZE(sk)] = {false};
unsigned int last = ARRAY_SIZE(sk) - 1;
@@ -275,16 +273,20 @@ static void test_client_active_rst(unsigned int port)
for (i = 0; i < last; i++) {
err = _test_connect_socket(sk[i], this_ip_dest, port,
(i == 0) ? TEST_TIMEOUT_SEC : -1);
-
if (err < 0)
test_error("failed to connect()");
}
- synchronize_threads(); /* 2: connection accept()ed, another queued */
- err = test_wait_fds(sk, last, is_writable, wait_for, TEST_TIMEOUT_SEC);
+ synchronize_threads(); /* 2: two connections: one accept()ed, another queued */
+ err = test_wait_fds(sk, last, is_writable, last, TEST_TIMEOUT_SEC);
if (err < 0)
test_error("test_wait_fds(): %d", err);
+ /* async connect() with third sk to get into request_sock_queue */
+ err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
+ if (err < 0)
+ test_error("failed to connect()");
+
synchronize_threads(); /* 3: close listen socket */
if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
test_fail("Failed to send data on connected socket");
@@ -292,13 +294,14 @@ static void test_client_active_rst(unsigned int port)
test_ok("Verified established tcp connection");
synchronize_threads(); /* 4: finishing up */
- err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
- if (err < 0)
- test_error("failed to connect()");
synchronize_threads(); /* 5: closed active sk */
- err = test_wait_fds(sk, ARRAY_SIZE(sk), NULL,
- wait_for, TEST_TIMEOUT_SEC);
+ /*
+ * Wait for 2 connections: one accepted, another in the accept queue,
+ * the one in request_sock_queue won't get fully established, so
+ * doesn't receive an active RST, see inet_csk_listen_stop().
+ */
+ err = test_wait_fds(sk, last, NULL, last, TEST_TIMEOUT_SEC);
if (err < 0)
test_error("select(): %d", err);
diff --git a/tools/testing/selftests/net/tcp_ao/setsockopt-closed.c b/tools/testing/selftests/net/tcp_ao/setsockopt-closed.c
index 452de131fa3a..517930f9721b 100644
--- a/tools/testing/selftests/net/tcp_ao/setsockopt-closed.c
+++ b/tools/testing/selftests/net/tcp_ao/setsockopt-closed.c
@@ -21,7 +21,7 @@ static void make_listen(int sk)
static void test_vefify_ao_info(int sk, struct tcp_ao_info_opt *info,
const char *tst)
{
- struct tcp_ao_info_opt tmp;
+ struct tcp_ao_info_opt tmp = {};
socklen_t len = sizeof(tmp);
if (getsockopt(sk, IPPROTO_TCP, TCP_AO_INFO, &tmp, &len))
diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh
index 74ff9fb2a6f0..58da5de99ac4 100755
--- a/tools/testing/selftests/net/test_vxlan_mdb.sh
+++ b/tools/testing/selftests/net/test_vxlan_mdb.sh
@@ -1177,6 +1177,7 @@ encap_params_common()
local plen=$1; shift
local enc_ethtype=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local src=$1; shift
local mz=$1; shift
@@ -1195,11 +1196,11 @@ encap_params_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep2_ip src_vni 10020"
run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Destination IP - match"
- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Destination IP - no match"
@@ -1212,20 +1213,20 @@ encap_params_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip dst_port 1111 src_vni 10020"
run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 4789 action pass"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev veth0 ingress" 101 1
log_test $? 0 "Default destination port - match"
- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev veth0 ingress" 101 1
log_test $? 0 "Default destination port - no match"
run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 1111 action pass"
- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev veth0 ingress" 101 1
log_test $? 0 "Non-default destination port - match"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev veth0 ingress" 101 1
log_test $? 0 "Non-default destination port - no match"
@@ -1238,11 +1239,11 @@ encap_params_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10020"
run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10010 action pass"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Default destination VNI - match"
- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Default destination VNI - no match"
@@ -1250,11 +1251,11 @@ encap_params_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip vni 10010 src_vni 10020"
run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10020 action pass"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Non-default destination VNI - match"
- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Non-default destination VNI - no match"
@@ -1272,6 +1273,7 @@ encap_params_ipv4_ipv4()
local plen=32
local enc_ethtype="ip"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
@@ -1279,7 +1281,7 @@ encap_params_ipv4_ipv4()
echo "------------------------------------------------------------------"
encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
- $grp $src "mausezahn"
+ $grp $grp_dmac $src "mausezahn"
}
encap_params_ipv6_ipv4()
@@ -1291,6 +1293,7 @@ encap_params_ipv6_ipv4()
local plen=32
local enc_ethtype="ip"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
@@ -1298,7 +1301,7 @@ encap_params_ipv6_ipv4()
echo "------------------------------------------------------------------"
encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
- $grp $src "mausezahn -6"
+ $grp $grp_dmac $src "mausezahn -6"
}
encap_params_ipv4_ipv6()
@@ -1310,6 +1313,7 @@ encap_params_ipv4_ipv6()
local plen=128
local enc_ethtype="ipv6"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
@@ -1317,7 +1321,7 @@ encap_params_ipv4_ipv6()
echo "------------------------------------------------------------------"
encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
- $grp $src "mausezahn"
+ $grp $grp_dmac $src "mausezahn"
}
encap_params_ipv6_ipv6()
@@ -1329,6 +1333,7 @@ encap_params_ipv6_ipv6()
local plen=128
local enc_ethtype="ipv6"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
@@ -1336,7 +1341,7 @@ encap_params_ipv6_ipv6()
echo "------------------------------------------------------------------"
encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
- $grp $src "mausezahn -6"
+ $grp $grp_dmac $src "mausezahn -6"
}
starg_exclude_ir_common()
@@ -1347,6 +1352,7 @@ starg_exclude_ir_common()
local vtep2_ip=$1; shift
local plen=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local valid_src=$1; shift
local invalid_src=$1; shift
local mz=$1; shift
@@ -1368,14 +1374,14 @@ starg_exclude_ir_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $vtep2_ip src_vni 10010"
# Check that invalid source is not forwarded to any VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 0
log_test $? 0 "Block excluded source - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 0
log_test $? 0 "Block excluded source - second VTEP"
# Check that valid source is forwarded to both VTEPs.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Forward valid source - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -1385,14 +1391,14 @@ starg_exclude_ir_common()
run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010"
# Check that invalid source is not forwarded to any VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Block excluded source after removal - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
log_test $? 0 "Block excluded source after removal - second VTEP"
# Check that valid source is forwarded to the remaining VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 2
log_test $? 0 "Forward valid source after removal - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -1407,6 +1413,7 @@ starg_exclude_ir_ipv4_ipv4()
local vtep2_ip=198.51.100.200
local plen=32
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1415,7 +1422,7 @@ starg_exclude_ir_ipv4_ipv4()
echo "-------------------------------------------------------------"
starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn"
+ $grp_dmac $valid_src $invalid_src "mausezahn"
}
starg_exclude_ir_ipv6_ipv4()
@@ -1426,6 +1433,7 @@ starg_exclude_ir_ipv6_ipv4()
local vtep2_ip=198.51.100.200
local plen=32
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1434,7 +1442,7 @@ starg_exclude_ir_ipv6_ipv4()
echo "-------------------------------------------------------------"
starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn -6"
+ $grp_dmac $valid_src $invalid_src "mausezahn -6"
}
starg_exclude_ir_ipv4_ipv6()
@@ -1445,6 +1453,7 @@ starg_exclude_ir_ipv4_ipv6()
local vtep2_ip=2001:db8:2000::1
local plen=128
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1453,7 +1462,7 @@ starg_exclude_ir_ipv4_ipv6()
echo "-------------------------------------------------------------"
starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn"
+ $grp_dmac $valid_src $invalid_src "mausezahn"
}
starg_exclude_ir_ipv6_ipv6()
@@ -1464,6 +1473,7 @@ starg_exclude_ir_ipv6_ipv6()
local vtep2_ip=2001:db8:2000::1
local plen=128
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1472,7 +1482,7 @@ starg_exclude_ir_ipv6_ipv6()
echo "-------------------------------------------------------------"
starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn -6"
+ $grp_dmac $valid_src $invalid_src "mausezahn -6"
}
starg_include_ir_common()
@@ -1483,6 +1493,7 @@ starg_include_ir_common()
local vtep2_ip=$1; shift
local plen=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local valid_src=$1; shift
local invalid_src=$1; shift
local mz=$1; shift
@@ -1504,14 +1515,14 @@ starg_include_ir_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $vtep2_ip src_vni 10010"
# Check that invalid source is not forwarded to any VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 0
log_test $? 0 "Block excluded source - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 0
log_test $? 0 "Block excluded source - second VTEP"
# Check that valid source is forwarded to both VTEPs.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Forward valid source - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -1521,14 +1532,14 @@ starg_include_ir_common()
run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010"
# Check that invalid source is not forwarded to any VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Block excluded source after removal - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
log_test $? 0 "Block excluded source after removal - second VTEP"
# Check that valid source is forwarded to the remaining VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 2
log_test $? 0 "Forward valid source after removal - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -1543,6 +1554,7 @@ starg_include_ir_ipv4_ipv4()
local vtep2_ip=198.51.100.200
local plen=32
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1551,7 +1563,7 @@ starg_include_ir_ipv4_ipv4()
echo "-------------------------------------------------------------"
starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn"
+ $grp_dmac $valid_src $invalid_src "mausezahn"
}
starg_include_ir_ipv6_ipv4()
@@ -1562,6 +1574,7 @@ starg_include_ir_ipv6_ipv4()
local vtep2_ip=198.51.100.200
local plen=32
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1570,7 +1583,7 @@ starg_include_ir_ipv6_ipv4()
echo "-------------------------------------------------------------"
starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn -6"
+ $grp_dmac $valid_src $invalid_src "mausezahn -6"
}
starg_include_ir_ipv4_ipv6()
@@ -1581,6 +1594,7 @@ starg_include_ir_ipv4_ipv6()
local vtep2_ip=2001:db8:2000::1
local plen=128
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1589,7 +1603,7 @@ starg_include_ir_ipv4_ipv6()
echo "-------------------------------------------------------------"
starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn"
+ $grp_dmac $valid_src $invalid_src "mausezahn"
}
starg_include_ir_ipv6_ipv6()
@@ -1600,6 +1614,7 @@ starg_include_ir_ipv6_ipv6()
local vtep2_ip=2001:db8:2000::1
local plen=128
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1608,7 +1623,7 @@ starg_include_ir_ipv6_ipv6()
echo "-------------------------------------------------------------"
starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
- $valid_src $invalid_src "mausezahn -6"
+ $grp_dmac $valid_src $invalid_src "mausezahn -6"
}
starg_exclude_p2mp_common()
@@ -1618,6 +1633,7 @@ starg_exclude_p2mp_common()
local mcast_grp=$1; shift
local plen=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local valid_src=$1; shift
local invalid_src=$1; shift
local mz=$1; shift
@@ -1635,12 +1651,12 @@ starg_exclude_p2mp_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $mcast_grp src_vni 10010 via veth0"
# Check that invalid source is not forwarded.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 0
log_test $? 0 "Block excluded source"
# Check that valid source is forwarded.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Forward valid source"
@@ -1648,7 +1664,7 @@ starg_exclude_p2mp_common()
run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0"
# Check that valid source is not received anymore.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Receive of valid source after removal from group"
}
@@ -1660,6 +1676,7 @@ starg_exclude_p2mp_ipv4_ipv4()
local mcast_grp=238.1.1.1
local plen=32
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1667,7 +1684,7 @@ starg_exclude_p2mp_ipv4_ipv4()
echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv4 underlay"
echo "---------------------------------------------------------------"
- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn"
}
@@ -1678,6 +1695,7 @@ starg_exclude_p2mp_ipv6_ipv4()
local mcast_grp=238.1.1.1
local plen=32
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1685,7 +1703,7 @@ starg_exclude_p2mp_ipv6_ipv4()
echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv4 underlay"
echo "---------------------------------------------------------------"
- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn -6"
}
@@ -1696,6 +1714,7 @@ starg_exclude_p2mp_ipv4_ipv6()
local mcast_grp=ff0e::2
local plen=128
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1703,7 +1722,7 @@ starg_exclude_p2mp_ipv4_ipv6()
echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv6 underlay"
echo "---------------------------------------------------------------"
- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn"
}
@@ -1714,6 +1733,7 @@ starg_exclude_p2mp_ipv6_ipv6()
local mcast_grp=ff0e::2
local plen=128
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1721,7 +1741,7 @@ starg_exclude_p2mp_ipv6_ipv6()
echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv6 underlay"
echo "---------------------------------------------------------------"
- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn -6"
}
@@ -1732,6 +1752,7 @@ starg_include_p2mp_common()
local mcast_grp=$1; shift
local plen=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local valid_src=$1; shift
local invalid_src=$1; shift
local mz=$1; shift
@@ -1749,12 +1770,12 @@ starg_include_p2mp_common()
run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $mcast_grp src_vni 10010 via veth0"
# Check that invalid source is not forwarded.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 0
log_test $? 0 "Block excluded source"
# Check that valid source is forwarded.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Forward valid source"
@@ -1762,7 +1783,7 @@ starg_include_p2mp_common()
run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0"
# Check that valid source is not received anymore.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Receive of valid source after removal from group"
}
@@ -1774,6 +1795,7 @@ starg_include_p2mp_ipv4_ipv4()
local mcast_grp=238.1.1.1
local plen=32
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1781,7 +1803,7 @@ starg_include_p2mp_ipv4_ipv4()
echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv4 underlay"
echo "---------------------------------------------------------------"
- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn"
}
@@ -1792,6 +1814,7 @@ starg_include_p2mp_ipv6_ipv4()
local mcast_grp=238.1.1.1
local plen=32
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1799,7 +1822,7 @@ starg_include_p2mp_ipv6_ipv4()
echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv4 underlay"
echo "---------------------------------------------------------------"
- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn -6"
}
@@ -1810,6 +1833,7 @@ starg_include_p2mp_ipv4_ipv6()
local mcast_grp=ff0e::2
local plen=128
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local valid_src=192.0.2.129
local invalid_src=192.0.2.145
@@ -1817,7 +1841,7 @@ starg_include_p2mp_ipv4_ipv6()
echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv6 underlay"
echo "---------------------------------------------------------------"
- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn"
}
@@ -1828,6 +1852,7 @@ starg_include_p2mp_ipv6_ipv6()
local mcast_grp=ff0e::2
local plen=128
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local valid_src=2001:db8:100::1
local invalid_src=2001:db8:200::1
@@ -1835,7 +1860,7 @@ starg_include_p2mp_ipv6_ipv6()
echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv6 underlay"
echo "---------------------------------------------------------------"
- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
+ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
$valid_src $invalid_src "mausezahn -6"
}
@@ -1847,6 +1872,7 @@ egress_vni_translation_common()
local plen=$1; shift
local proto=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local src=$1; shift
local mz=$1; shift
@@ -1882,20 +1908,20 @@ egress_vni_translation_common()
# Make sure that packets sent from the first VTEP over VLAN 10 are
# received by the SVI corresponding to the L3VNI (14000 / VLAN 4000) on
# the second VTEP, since it is configured as PVID.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1
log_test $? 0 "Egress VNI translation - PVID configured"
# Remove PVID flag from VLAN 4000 on the second VTEP and make sure
# packets are no longer received by the SVI interface.
run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1
log_test $? 0 "Egress VNI translation - no PVID configured"
# Reconfigure the PVID and make sure packets are received again.
run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0 pvid"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev br0.4000 ingress" 101 2
log_test $? 0 "Egress VNI translation - PVID reconfigured"
}
@@ -1908,6 +1934,7 @@ egress_vni_translation_ipv4_ipv4()
local plen=32
local proto="ipv4"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
@@ -1915,7 +1942,7 @@ egress_vni_translation_ipv4_ipv4()
echo "----------------------------------------------------------------"
egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
- $src "mausezahn"
+ $grp_dmac $src "mausezahn"
}
egress_vni_translation_ipv6_ipv4()
@@ -1926,6 +1953,7 @@ egress_vni_translation_ipv6_ipv4()
local plen=32
local proto="ipv6"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
@@ -1933,7 +1961,7 @@ egress_vni_translation_ipv6_ipv4()
echo "----------------------------------------------------------------"
egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
- $src "mausezahn -6"
+ $grp_dmac $src "mausezahn -6"
}
egress_vni_translation_ipv4_ipv6()
@@ -1944,6 +1972,7 @@ egress_vni_translation_ipv4_ipv6()
local plen=128
local proto="ipv4"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
@@ -1951,7 +1980,7 @@ egress_vni_translation_ipv4_ipv6()
echo "----------------------------------------------------------------"
egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
- $src "mausezahn"
+ $grp_dmac $src "mausezahn"
}
egress_vni_translation_ipv6_ipv6()
@@ -1962,6 +1991,7 @@ egress_vni_translation_ipv6_ipv6()
local plen=128
local proto="ipv6"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
@@ -1969,7 +1999,7 @@ egress_vni_translation_ipv6_ipv6()
echo "----------------------------------------------------------------"
egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
- $src "mausezahn -6"
+ $grp_dmac $src "mausezahn -6"
}
all_zeros_mdb_common()
@@ -1982,12 +2012,18 @@ all_zeros_mdb_common()
local vtep4_ip=$1; shift
local plen=$1; shift
local ipv4_grp=239.1.1.1
+ local ipv4_grp_dmac=01:00:5e:01:01:01
local ipv4_unreg_grp=239.2.2.2
+ local ipv4_unreg_grp_dmac=01:00:5e:02:02:02
local ipv4_ll_grp=224.0.0.100
+ local ipv4_ll_grp_dmac=01:00:5e:00:00:64
local ipv4_src=192.0.2.129
local ipv6_grp=ff0e::1
+ local ipv6_grp_dmac=33:33:00:00:00:01
local ipv6_unreg_grp=ff0e::2
+ local ipv6_unreg_grp_dmac=33:33:00:00:00:02
local ipv6_ll_grp=ff02::1
+ local ipv6_ll_grp_dmac=33:33:00:00:00:01
local ipv6_src=2001:db8:100::1
# Install all-zeros (catchall) MDB entries for IPv4 and IPv6 traffic
@@ -2023,7 +2059,7 @@ all_zeros_mdb_common()
# Send registered IPv4 multicast and make sure it only arrives to the
# first VTEP.
- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_grp_dmac -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "Registered IPv4 multicast - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 0
@@ -2031,7 +2067,7 @@ all_zeros_mdb_common()
# Send unregistered IPv4 multicast that is not link-local and make sure
# it arrives to the first and second VTEPs.
- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_unreg_grp_dmac -A $ipv4_src -B $ipv4_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 2
log_test $? 0 "Unregistered IPv4 multicast - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -2039,7 +2075,7 @@ all_zeros_mdb_common()
# Send IPv4 link-local multicast traffic and make sure it does not
# arrive to any VTEP.
- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_ll_grp_dmac -A $ipv4_src -B $ipv4_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 2
log_test $? 0 "Link-local IPv4 multicast - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 1
@@ -2074,7 +2110,7 @@ all_zeros_mdb_common()
# Send registered IPv6 multicast and make sure it only arrives to the
# third VTEP.
- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_grp_dmac -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 103 1
log_test $? 0 "Registered IPv6 multicast - third VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 104 0
@@ -2082,7 +2118,7 @@ all_zeros_mdb_common()
# Send unregistered IPv6 multicast that is not link-local and make sure
# it arrives to the third and fourth VTEPs.
- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_unreg_grp_dmac -A $ipv6_src -B $ipv6_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 103 2
log_test $? 0 "Unregistered IPv6 multicast - third VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 104 1
@@ -2090,7 +2126,7 @@ all_zeros_mdb_common()
# Send IPv6 link-local multicast traffic and make sure it does not
# arrive to any VTEP.
- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_ll_grp_dmac -A $ipv6_src -B $ipv6_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 103 2
log_test $? 0 "Link-local IPv6 multicast - third VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 104 1
@@ -2165,6 +2201,7 @@ mdb_fdb_common()
local plen=$1; shift
local proto=$1; shift
local grp=$1; shift
+ local grp_dmac=$1; shift
local src=$1; shift
local mz=$1; shift
@@ -2188,7 +2225,7 @@ mdb_fdb_common()
# Send IP multicast traffic and make sure it is forwarded by the MDB
# and only arrives to the first VTEP.
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "IP multicast - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 0
@@ -2205,7 +2242,7 @@ mdb_fdb_common()
# Remove the MDB entry and make sure that IP multicast is now forwarded
# by the FDB to the second VTEP.
run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010"
- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
tc_check_packets "$ns2" "dev vx0 ingress" 101 1
log_test $? 0 "IP multicast after removal - first VTEP"
tc_check_packets "$ns2" "dev vx0 ingress" 102 2
@@ -2221,14 +2258,15 @@ mdb_fdb_ipv4_ipv4()
local plen=32
local proto="ipv4"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
echo "Data path: MDB with FDB - IPv4 overlay / IPv4 underlay"
echo "------------------------------------------------------"
- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
- "mausezahn"
+ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
+ $grp_dmac $src "mausezahn"
}
mdb_fdb_ipv6_ipv4()
@@ -2240,14 +2278,15 @@ mdb_fdb_ipv6_ipv4()
local plen=32
local proto="ipv6"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
echo "Data path: MDB with FDB - IPv6 overlay / IPv4 underlay"
echo "------------------------------------------------------"
- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
- "mausezahn -6"
+ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
+ $grp_dmac $src "mausezahn -6"
}
mdb_fdb_ipv4_ipv6()
@@ -2259,14 +2298,15 @@ mdb_fdb_ipv4_ipv6()
local plen=128
local proto="ipv4"
local grp=239.1.1.1
+ local grp_dmac=01:00:5e:01:01:01
local src=192.0.2.129
echo
echo "Data path: MDB with FDB - IPv4 overlay / IPv6 underlay"
echo "------------------------------------------------------"
- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
- "mausezahn"
+ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
+ $grp_dmac $src "mausezahn"
}
mdb_fdb_ipv6_ipv6()
@@ -2278,14 +2318,15 @@ mdb_fdb_ipv6_ipv6()
local plen=128
local proto="ipv6"
local grp=ff0e::1
+ local grp_dmac=33:33:00:00:00:01
local src=2001:db8:100::1
echo
echo "Data path: MDB with FDB - IPv6 overlay / IPv6 underlay"
echo "------------------------------------------------------"
- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
- "mausezahn -6"
+ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
+ $grp_dmac $src "mausezahn -6"
}
mdb_grp1_loop()
@@ -2320,7 +2361,9 @@ mdb_torture_common()
local vtep1_ip=$1; shift
local vtep2_ip=$1; shift
local grp1=$1; shift
+ local grp1_dmac=$1; shift
local grp2=$1; shift
+ local grp2_dmac=$1; shift
local src=$1; shift
local mz=$1; shift
local pid1
@@ -2345,9 +2388,9 @@ mdb_torture_common()
pid1=$!
mdb_grp2_loop $ns1 $vtep1_ip $vtep2_ip $grp2 &
pid2=$!
- ip netns exec $ns1 $mz br0.10 -A $src -B $grp1 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
+ ip netns exec $ns1 $mz br0.10 -a own -b $grp1_dmac -A $src -B $grp1 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
pid3=$!
- ip netns exec $ns1 $mz br0.10 -A $src -B $grp2 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
+ ip netns exec $ns1 $mz br0.10 -a own -b $grp2_dmac -A $src -B $grp2 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
pid4=$!
sleep 30
@@ -2363,15 +2406,17 @@ mdb_torture_ipv4_ipv4()
local vtep1_ip=198.51.100.100
local vtep2_ip=198.51.100.200
local grp1=239.1.1.1
+ local grp1_dmac=01:00:5e:01:01:01
local grp2=239.2.2.2
+ local grp2_dmac=01:00:5e:02:02:02
local src=192.0.2.129
echo
echo "Data path: MDB torture test - IPv4 overlay / IPv4 underlay"
echo "----------------------------------------------------------"
- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
- "mausezahn"
+ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
+ $grp2_dmac $src "mausezahn"
}
mdb_torture_ipv6_ipv4()
@@ -2380,15 +2425,17 @@ mdb_torture_ipv6_ipv4()
local vtep1_ip=198.51.100.100
local vtep2_ip=198.51.100.200
local grp1=ff0e::1
+ local grp1_dmac=33:33:00:00:00:01
local grp2=ff0e::2
+ local grp2_dmac=33:33:00:00:00:02
local src=2001:db8:100::1
echo
echo "Data path: MDB torture test - IPv6 overlay / IPv4 underlay"
echo "----------------------------------------------------------"
- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
- "mausezahn -6"
+ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
+ $grp2_dmac $src "mausezahn -6"
}
mdb_torture_ipv4_ipv6()
@@ -2397,15 +2444,17 @@ mdb_torture_ipv4_ipv6()
local vtep1_ip=2001:db8:1000::1
local vtep2_ip=2001:db8:2000::1
local grp1=239.1.1.1
+ local grp1_dmac=01:00:5e:01:01:01
local grp2=239.2.2.2
+ local grp2_dmac=01:00:5e:02:02:02
local src=192.0.2.129
echo
echo "Data path: MDB torture test - IPv4 overlay / IPv6 underlay"
echo "----------------------------------------------------------"
- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
- "mausezahn"
+ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
+ $grp2_dmac $src "mausezahn"
}
mdb_torture_ipv6_ipv6()
@@ -2414,15 +2463,17 @@ mdb_torture_ipv6_ipv6()
local vtep1_ip=2001:db8:1000::1
local vtep2_ip=2001:db8:2000::1
local grp1=ff0e::1
+ local grp1_dmac=33:33:00:00:00:01
local grp2=ff0e::2
+ local grp2_dmac=33:33:00:00:00:02
local src=2001:db8:100::1
echo
echo "Data path: MDB torture test - IPv6 overlay / IPv6 underlay"
echo "----------------------------------------------------------"
- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
- "mausezahn -6"
+ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
+ $grp2_dmac $src "mausezahn -6"
}
################################################################################
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index c6eda21cefb6..f27a12d2a2c9 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -1615,6 +1615,40 @@ TEST_F(tls, getsockopt)
EXPECT_EQ(errno, EINVAL);
}
+TEST_F(tls, recv_efault)
+{
+ char *rec1 = "1111111111";
+ char *rec2 = "2222222222";
+ struct msghdr hdr = {};
+ struct iovec iov[2];
+ char recv_mem[12];
+ int ret;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ EXPECT_EQ(send(self->fd, rec1, 10, 0), 10);
+ EXPECT_EQ(send(self->fd, rec2, 10, 0), 10);
+
+ iov[0].iov_base = recv_mem;
+ iov[0].iov_len = sizeof(recv_mem);
+ iov[1].iov_base = NULL; /* broken iov to make process_rx_list fail */
+ iov[1].iov_len = 1;
+
+ hdr.msg_iovlen = 2;
+ hdr.msg_iov = iov;
+
+ EXPECT_EQ(recv(self->cfd, recv_mem, 1, 0), 1);
+ EXPECT_EQ(recv_mem[0], rec1[0]);
+
+ ret = recvmsg(self->cfd, &hdr, 0);
+ EXPECT_LE(ret, sizeof(recv_mem));
+ EXPECT_GE(ret, 9);
+ EXPECT_EQ(memcmp(rec1, recv_mem, 9), 0);
+ if (ret > 9)
+ EXPECT_EQ(memcmp(rec2, recv_mem + 9, ret - 9), 0);
+}
+
FIXTURE(tls_err)
{
int fd, cfd;
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 380cb15e942e..83ed987cff34 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -244,7 +244,7 @@ for family in 4 6; do
create_vxlan_pair
ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on
ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
- run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1
+ run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10
cleanup
# use NAT to circumvent GRO FWD check
@@ -258,13 +258,7 @@ for family in 4 6; do
# load arp cache before running the test to reduce the amount of
# stray traffic on top of the UDP tunnel
ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
- run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
- cleanup
-
- create_vxlan_pair
- run_bench "UDP tunnel fwd perf" $OL_NET$DST
- ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
- run_bench "UDP tunnel GRO fwd perf" $OL_NET$DST
+ run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST
cleanup
done
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 1d975bf52af3..85b3baa3f7f3 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -34,7 +34,7 @@
#endif
#ifndef UDP_MAX_SEGMENTS
-#define UDP_MAX_SEGMENTS (1 << 6UL)
+#define UDP_MAX_SEGMENTS (1 << 7UL)
#endif
#define CONST_MTU_TEST 1500
diff --git a/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
index 505294da1b9f..d6f99eb9be65 100644
--- a/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
+++ b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
@@ -154,7 +154,7 @@ static int dev_papr_vpd_null_handle(void)
static int papr_vpd_close_handle_without_reading(void)
{
const int devfd = open(DEVPATH, O_RDONLY);
- struct papr_location_code lc;
+ struct papr_location_code lc = { .str = "", };
int fd;
SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
index c537d52fafc5..a40541bb7c7d 100644
--- a/tools/testing/selftests/riscv/hwprobe/cbo.c
+++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
@@ -19,7 +19,7 @@
#include "hwprobe.h"
#include "../../kselftest.h"
-#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
+#define MK_CBO(fn) le32_bswap((uint32_t)(fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
index e3fccb390c4d..f3de970c3222 100644
--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.h
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
@@ -4,6 +4,16 @@
#include <stddef.h>
#include <asm/hwprobe.h>
+#if __BYTE_ORDER == __BIG_ENDIAN
+# define le32_bswap(_x) \
+ ((((_x) & 0x000000ffU) << 24) | \
+ (((_x) & 0x0000ff00U) << 8) | \
+ (((_x) & 0x00ff0000U) >> 8) | \
+ (((_x) & 0xff000000U) >> 24))
+#else
+# define le32_bswap(_x) (_x)
+#endif
+
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
diff --git a/tools/testing/selftests/seccomp/settings b/tools/testing/selftests/seccomp/settings
index 6091b45d226b..a953c96aa16e 100644
--- a/tools/testing/selftests/seccomp/settings
+++ b/tools/testing/selftests/seccomp/settings
@@ -1 +1 @@
-timeout=120
+timeout=180
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
index b5d592d4099e..d975a6767329 100644
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
@@ -158,6 +158,20 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
/* In preparation for sigreturn. */
SYSCALL_DISPATCH_OFF(glob_sel);
+
+ /*
+ * The tests for argument handling assume that `syscall(x) == x`. This
+ * is a NOP on x86 because the syscall number is passed in %rax, which
+ * happens to also be the function ABI return register. Other
+ * architectures may need to swizzle the arguments around.
+ */
+#if defined(__riscv)
+/* REG_A7 is not defined in libc headers */
+# define REG_A7 (REG_A0 + 7)
+
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A0] =
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A7];
+#endif
}
TEST(dispatch_and_return)
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index d49dd3ffd0d9..c001dd79179d 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
diff = end.tv_usec - start.tv_usec;
diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
- if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff);
return -1;
}
@@ -184,80 +184,71 @@ static int check_timer_create(int which)
return 0;
}
-int remain;
-__thread int got_signal;
+static pthread_t ctd_thread;
+static volatile int ctd_count, ctd_failed;
-static void *distribution_thread(void *arg)
+static void ctd_sighandler(int sig)
{
- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
- return NULL;
+ if (pthread_self() != ctd_thread)
+ ctd_failed = 1;
+ ctd_count--;
}
-static void distribution_handler(int nr)
+static void *ctd_thread_func(void *arg)
{
- if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
- __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
-}
-
-/*
- * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
- * timer signals. This primarily tests that the kernel does not favour any one.
- */
-static int check_timer_distribution(void)
-{
- int err, i;
- timer_t id;
- const int nthreads = 10;
- pthread_t threads[nthreads];
struct itimerspec val = {
.it_value.tv_sec = 0,
.it_value.tv_nsec = 1000 * 1000,
.it_interval.tv_sec = 0,
.it_interval.tv_nsec = 1000 * 1000,
};
+ timer_t id;
- remain = nthreads + 1; /* worker threads + this thread */
- signal(SIGALRM, distribution_handler);
- err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
- if (err < 0) {
- ksft_perror("Can't create timer");
- return -1;
- }
- err = timer_settime(id, 0, &val, NULL);
- if (err < 0) {
- ksft_perror("Can't set timer");
- return -1;
- }
+ /* 1/10 seconds to ensure the leader sleeps */
+ usleep(10000);
- for (i = 0; i < nthreads; i++) {
- err = pthread_create(&threads[i], NULL, distribution_thread,
- NULL);
- if (err) {
- ksft_print_msg("Can't create thread: %s (%d)\n",
- strerror(errno), errno);
- return -1;
- }
- }
+ ctd_count = 100;
+ if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
+ return "Can't create timer\n";
+ if (timer_settime(id, 0, &val, NULL))
+ return "Can't set timer\n";
- /* Wait for all threads to receive the signal. */
- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+ while (ctd_count > 0 && !ctd_failed)
+ ;
- for (i = 0; i < nthreads; i++) {
- err = pthread_join(threads[i], NULL);
- if (err) {
- ksft_print_msg("Can't join thread: %s (%d)\n",
- strerror(errno), errno);
- return -1;
- }
- }
+ if (timer_delete(id))
+ return "Can't delete timer\n";
- if (timer_delete(id)) {
- ksft_perror("Can't delete timer");
- return -1;
- }
+ return NULL;
+}
+
+/*
+ * Test that only the running thread receives the timer signal.
+ */
+static int check_timer_distribution(void)
+{
+ const char *errmsg;
- ksft_test_result_pass("check_timer_distribution\n");
+ signal(SIGALRM, ctd_sighandler);
+
+ errmsg = "Can't create thread\n";
+ if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
+ goto err;
+
+ errmsg = "Can't join thread\n";
+ if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
+ goto err;
+
+ if (!ctd_failed)
+ ksft_test_result_pass("check signal distribution\n");
+ else if (ksft_min_kernel_version(6, 3))
+ ksft_test_result_fail("check signal distribution\n");
+ else
+ ksft_test_result_skip("check signal distribution (old kernel)\n");
return 0;
+err:
+ ksft_print_msg("%s", errmsg);
+ return -1;
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index 48b9a803235a..d13ebde20322 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -21,9 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
-
-
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
@@ -62,45 +59,47 @@ int clear_time_state(void)
#define NUM_FREQ_OUTOFRANGE 4
#define NUM_FREQ_INVALID 2
+#define SHIFTED_PPM (1 << 16)
+
long valid_freq[NUM_FREQ_VALID] = {
- -499<<16,
- -450<<16,
- -400<<16,
- -350<<16,
- -300<<16,
- -250<<16,
- -200<<16,
- -150<<16,
- -100<<16,
- -75<<16,
- -50<<16,
- -25<<16,
- -10<<16,
- -5<<16,
- -1<<16,
+ -499 * SHIFTED_PPM,
+ -450 * SHIFTED_PPM,
+ -400 * SHIFTED_PPM,
+ -350 * SHIFTED_PPM,
+ -300 * SHIFTED_PPM,
+ -250 * SHIFTED_PPM,
+ -200 * SHIFTED_PPM,
+ -150 * SHIFTED_PPM,
+ -100 * SHIFTED_PPM,
+ -75 * SHIFTED_PPM,
+ -50 * SHIFTED_PPM,
+ -25 * SHIFTED_PPM,
+ -10 * SHIFTED_PPM,
+ -5 * SHIFTED_PPM,
+ -1 * SHIFTED_PPM,
-1000,
- 1<<16,
- 5<<16,
- 10<<16,
- 25<<16,
- 50<<16,
- 75<<16,
- 100<<16,
- 150<<16,
- 200<<16,
- 250<<16,
- 300<<16,
- 350<<16,
- 400<<16,
- 450<<16,
- 499<<16,
+ 1 * SHIFTED_PPM,
+ 5 * SHIFTED_PPM,
+ 10 * SHIFTED_PPM,
+ 25 * SHIFTED_PPM,
+ 50 * SHIFTED_PPM,
+ 75 * SHIFTED_PPM,
+ 100 * SHIFTED_PPM,
+ 150 * SHIFTED_PPM,
+ 200 * SHIFTED_PPM,
+ 250 * SHIFTED_PPM,
+ 300 * SHIFTED_PPM,
+ 350 * SHIFTED_PPM,
+ 400 * SHIFTED_PPM,
+ 450 * SHIFTED_PPM,
+ 499 * SHIFTED_PPM,
};
long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
- -1000<<16,
- -550<<16,
- 550<<16,
- 1000<<16,
+ -1000 * SHIFTED_PPM,
+ -550 * SHIFTED_PPM,
+ 550 * SHIFTED_PPM,
+ 1000 * SHIFTED_PPM,
};
#define LONG_MAX (~0UL>>1)
diff --git a/tools/testing/selftests/turbostat/defcolumns.py b/tools/testing/selftests/turbostat/defcolumns.py
new file mode 100755
index 000000000000..d9b042097da7
--- /dev/null
+++ b/tools/testing/selftests/turbostat/defcolumns.py
@@ -0,0 +1,60 @@
+#!/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+from shutil import which
+
+turbostat = which('turbostat')
+if turbostat is None:
+ print('Could not find turbostat binary')
+ exit(1)
+
+timeout = which('timeout')
+if timeout is None:
+ print('Could not find timeout binary')
+ exit(1)
+
+proc_turbostat = subprocess.run([turbostat, '--list'], capture_output = True)
+if proc_turbostat.returncode != 0:
+ print(f'turbostat failed with {proc_turbostat.returncode}')
+ exit(1)
+
+#
+# By default --list reports also "usec" and "Time_Of_Day_Seconds" columns
+# which are only visible when running with --debug.
+#
+expected_columns_debug = proc_turbostat.stdout.replace(b',', b'\t').strip()
+expected_columns = expected_columns_debug.replace(b'usec\t', b'').replace(b'Time_Of_Day_Seconds\t', b'').replace(b'X2APIC\t', b'').replace(b'APIC\t', b'')
+
+#
+# Run turbostat with no options for 10 seconds and send SIGINT
+#
+timeout_argv = [timeout, '--preserve-status', '-s', 'SIGINT', '-k', '3', '1s']
+turbostat_argv = [turbostat, '-i', '0.250']
+
+print(f'Running turbostat with {turbostat_argv=}... ', end = '', flush = True)
+proc_turbostat = subprocess.run(timeout_argv + turbostat_argv, capture_output = True)
+if proc_turbostat.returncode != 0:
+ print(f'turbostat failed with {proc_turbostat.returncode}')
+ exit(1)
+actual_columns = proc_turbostat.stdout.split(b'\n')[0]
+if expected_columns != actual_columns:
+ print(f'turbostat column check failed\n{expected_columns=}\n{actual_columns=}')
+ exit(1)
+print('OK')
+
+#
+# Same, but with --debug
+#
+turbostat_argv.append('--debug')
+
+print(f'Running turbostat with {turbostat_argv=}... ', end = '', flush = True)
+proc_turbostat = subprocess.run(timeout_argv + turbostat_argv, capture_output = True)
+if proc_turbostat.returncode != 0:
+ print(f'turbostat failed with {proc_turbostat.returncode}')
+ exit(1)
+actual_columns = proc_turbostat.stdout.split(b'\n')[0]
+if expected_columns_debug != actual_columns:
+ print(f'turbostat column check failed\n{expected_columns_debug=}\n{actual_columns=}')
+ exit(1)
+print('OK')
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fb49c2a60200..ff0a20565f90 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -832,8 +832,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
- gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
- hva_range.may_block);
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
/*
* If one or more memslots were found and thus zapped, notify arch code
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index ecefc7ec51af..715f19669d01 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -26,13 +26,11 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
#ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block);
+ unsigned long end);
#else
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end,
- bool may_block)
+ unsigned long end)
{
}
#endif /* HAVE_KVM_PFNCACHE */
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 4e07112a24c2..e3453e869e92 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -23,7 +23,7 @@
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
- unsigned long end, bool may_block)
+ unsigned long end)
{
struct gfn_to_pfn_cache *gpc;
@@ -57,6 +57,19 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
spin_unlock(&kvm->gpc_lock);
}
+static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
+ unsigned long len)
+{
+ unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
+ offset_in_page(gpa);
+
+ /*
+ * The cached access must fit within a single page. The 'len' argument
+ * to activate() and refresh() exists only to enforce that.
+ */
+ return offset + len <= PAGE_SIZE;
+}
+
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
@@ -74,7 +87,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (kvm_is_error_hva(gpc->uhva))
return false;
- if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
return false;
if (!gpc->valid)
@@ -232,8 +245,7 @@ out_error:
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
- unsigned long len)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
{
unsigned long page_offset;
bool unmap_old = false;
@@ -247,15 +259,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
return -EINVAL;
- /*
- * The cached acces must fit within a single page. The 'len' argument
- * exists only to enforce that.
- */
- page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
- offset_in_page(gpa);
- if (page_offset + len > PAGE_SIZE)
- return -EINVAL;
-
lockdep_assert_held(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
@@ -270,6 +273,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
if (kvm_is_error_gpa(gpa)) {
+ page_offset = offset_in_page(uhva);
+
gpc->gpa = INVALID_GPA;
gpc->memslot = NULL;
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
@@ -279,6 +284,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
} else {
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
+ page_offset = offset_in_page(gpa);
+
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
@@ -354,6 +361,9 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
guard(mutex)(&gpc->refresh_lock);
+ if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
+ return -EINVAL;
+
/*
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
* or HVA-based, not both. For GPA-based caches, the HVA will be
@@ -361,7 +371,7 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
*/
uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
- return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
}
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
@@ -381,6 +391,9 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
{
struct kvm *kvm = gpc->kvm;
+ if (!kvm_gpc_is_valid_len(gpa, uhva, len))
+ return -EINVAL;
+
guard(mutex)(&gpc->refresh_lock);
if (!gpc->active) {
@@ -400,11 +413,18 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva);
}
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
+ /*
+ * Explicitly disallow INVALID_GPA so that the magic value can be used
+ * by KVM to differentiate between GPA-based and HVA-based caches.
+ */
+ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
+ return -EINVAL;
+
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
}